diff --git a/platform/dbops/binaries/weaviate-src/usecases/modules/module_config_init_and_validate_test.go b/platform/dbops/binaries/weaviate-src/usecases/modules/module_config_init_and_validate_test.go new file mode 100644 index 0000000000000000000000000000000000000000..50a4be7a1572b1ea54a21c8b87acf2b3c22d6bd4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/modules/module_config_init_and_validate_test.go @@ -0,0 +1,492 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modules + +import ( + "context" + "testing" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/moduletools" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/config" +) + +func TestSetClassDefaults(t *testing.T) { + logger, _ := test.NewNullLogger() + t.Run("no modules", func(t *testing.T) { + class := &models.Class{ + Class: "Foo", + Vectorizer: "none", + } + + p := NewProvider(logger, config.Config{}) + p.SetClassDefaults(class) + + assert.Equal(t, &models.Class{Class: "Foo", Vectorizer: "none"}, class, + "the class is not changed") + }) + + t.Run("module is set, but does not have config capability", func(t *testing.T) { + class := &models.Class{ + Class: "Foo", + Vectorizer: "my-module", + } + + p := NewProvider(logger, config.Config{}) + p.Register(&dummyText2VecModuleNoCapabilities{name: "my-module"}) + p.SetClassDefaults(class) + + assert.Equal(t, &models.Class{Class: "Foo", Vectorizer: "my-module"}, class, + "the class is not changed") + }) + + t.Run("without user-provided values", func(t *testing.T) { + class := &models.Class{ + Class: "Foo", + Properties: []*models.Property{{ + Name: "Foo", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }}, + Vectorizer: "my-module", + } + expected := &models.Class{ + Class: "Foo", + ModuleConfig: map[string]interface{}{ + "my-module": map[string]interface{}{ + "per-class-prop-1": "some default value", + "per-class-prop-2": "some default value", + }, + }, + Properties: []*models.Property{{ + Name: "Foo", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + ModuleConfig: map[string]interface{}{ + "my-module": map[string]interface{}{ + "per-prop-1": "prop default value", + "per-prop-2": "prop default value", + }, + }, + }}, + Vectorizer: "my-module", + } + + p := NewProvider(logger, config.Config{}) + p.Register(&dummyModuleClassConfigurator{ + dummyText2VecModuleNoCapabilities: dummyText2VecModuleNoCapabilities{ + name: "my-module", + }, + }) + p.SetClassDefaults(class) + + assert.Equal(t, expected, class, + "the defaults were set from config") + }) + + t.Run("with some user-provided values", func(t *testing.T) { + class := &models.Class{ + Class: "Foo", + ModuleConfig: map[string]interface{}{ + "my-module": map[string]interface{}{ + "per-class-prop-1": "overwritten by user", + }, + }, + Properties: []*models.Property{{ + Name: "Foo", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + ModuleConfig: map[string]interface{}{ + "my-module": map[string]interface{}{ + "per-prop-1": "prop overwritten by user", + }, + }, + }}, + Vectorizer: "my-module", + } + expected := &models.Class{ + Class: "Foo", + ModuleConfig: map[string]interface{}{ + "my-module": map[string]interface{}{ + "per-class-prop-1": "overwritten by user", + "per-class-prop-2": "some default value", + }, + }, + Properties: []*models.Property{{ + Name: "Foo", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + ModuleConfig: map[string]interface{}{ + "my-module": map[string]interface{}{ + "per-prop-1": "prop overwritten by user", + "per-prop-2": "prop default value", + }, + }, + }}, + Vectorizer: "my-module", + } + + p := NewProvider(logger, config.Config{}) + p.Register(&dummyModuleClassConfigurator{ + dummyText2VecModuleNoCapabilities: dummyText2VecModuleNoCapabilities{ + name: "my-module", + }, + }) + p.SetClassDefaults(class) + + assert.Equal(t, expected, class, + "the defaults were set from config") + }) + + t.Run("named vector, without user-provided values", func(t *testing.T) { + class := &models.Class{ + Class: "Foo", + Properties: []*models.Property{{ + Name: "Foo", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }}, + VectorConfig: map[string]models.VectorConfig{ + "vec1": { + Vectorizer: map[string]interface{}{"my-module": map[string]interface{}{}}, + }, + }, + } + expected := &models.Class{ + Class: "Foo", + Properties: []*models.Property{{ + Name: "Foo", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + ModuleConfig: map[string]interface{}{ + "my-module": map[string]interface{}{ + "per-prop-1": "prop default value", + "per-prop-2": "prop default value", + }, + }, + }}, + VectorConfig: map[string]models.VectorConfig{ + "vec1": { + Vectorizer: map[string]interface{}{ + "my-module": map[string]interface{}{ + "per-class-prop-1": "some default value", + "per-class-prop-2": "some default value", + }, + }, + }, + }, + } + + p := NewProvider(logger, config.Config{}) + p.Register(&dummyModuleClassConfigurator{ + dummyText2VecModuleNoCapabilities: dummyText2VecModuleNoCapabilities{ + name: "my-module", + }, + }) + p.SetClassDefaults(class) + + assert.Equal(t, expected, class, "the defaults were set from config") + }) + + t.Run("mixed vector, without user-provided values", func(t *testing.T) { + class := &models.Class{ + Class: "Foo", + Properties: []*models.Property{{ + Name: "Foo", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }}, + VectorConfig: map[string]models.VectorConfig{ + "vec1": { + Vectorizer: map[string]interface{}{"my-module": map[string]interface{}{}}, + }, + }, + Vectorizer: "my-module", + VectorIndexConfig: hnsw.NewDefaultUserConfig(), + } + expected := &models.Class{ + Class: "Foo", + Properties: []*models.Property{{ + Name: "Foo", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + ModuleConfig: map[string]interface{}{ + "my-module": map[string]interface{}{ + "per-prop-1": "prop default value", + "per-prop-2": "prop default value", + }, + }, + }}, + VectorConfig: map[string]models.VectorConfig{ + "vec1": { + Vectorizer: map[string]interface{}{ + "my-module": map[string]interface{}{ + "per-class-prop-1": "some default value", + "per-class-prop-2": "some default value", + }, + }, + }, + }, + Vectorizer: "my-module", + ModuleConfig: map[string]interface{}{ + "my-module": map[string]interface{}{ + "per-class-prop-1": "some default value", + "per-class-prop-2": "some default value", + }, + }, + VectorIndexConfig: hnsw.NewDefaultUserConfig(), + } + + p := NewProvider(logger, config.Config{}) + p.Register(&dummyModuleClassConfigurator{ + dummyText2VecModuleNoCapabilities: dummyText2VecModuleNoCapabilities{ + name: "my-module", + }, + }) + p.SetClassDefaults(class) + + assert.Equal(t, expected, class, "the defaults were set from config") + }) +} + +func TestValidateClass(t *testing.T) { + ctx := context.Background() + logger, _ := test.NewNullLogger() + t.Run("when class has no vectorizer set, it does not check", func(t *testing.T) { + class := &models.Class{ + Class: "Foo", + Properties: []*models.Property{{ + Name: "Foo", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }}, + Vectorizer: "none", + } + + p := NewProvider(logger, config.Config{}) + p.Register(&dummyModuleClassConfigurator{ + validateError: errors.Errorf("if I was used, you'd fail"), + dummyText2VecModuleNoCapabilities: dummyText2VecModuleNoCapabilities{ + name: "my-module", + }, + }) + p.SetClassDefaults(class) + + assert.Nil(t, p.ValidateClass(ctx, class)) + }) + + t.Run("when vectorizer does not have capability, it skips validation", + func(t *testing.T) { + class := &models.Class{ + Class: "Foo", + Properties: []*models.Property{{ + Name: "Foo", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }}, + Vectorizer: "my-module", + } + + p := NewProvider(logger, config.Config{}) + p.Register(&dummyText2VecModuleNoCapabilities{ + name: "my-module", + }) + p.SetClassDefaults(class) + + assert.Nil(t, p.ValidateClass(ctx, class)) + }) + + t.Run("the module validates if capable and configured", func(t *testing.T) { + class := &models.Class{ + Class: "Foo", + Properties: []*models.Property{{ + Name: "Foo", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }}, + Vectorizer: "my-module", + } + + p := NewProvider(logger, config.Config{}) + p.Register(&dummyModuleClassConfigurator{ + validateError: errors.Errorf("no can do!"), + dummyText2VecModuleNoCapabilities: dummyText2VecModuleNoCapabilities{ + name: "my-module", + }, + }) + p.SetClassDefaults(class) + + err := p.ValidateClass(ctx, class) + require.NotNil(t, err) + assert.Equal(t, "module 'my-module': no can do!", err.Error()) + }) +} + +func TestSetSinglePropertyDefaults(t *testing.T) { + class := &models.Class{ + Class: "Foo", + ModuleConfig: map[string]interface{}{ + "my-module": map[string]interface{}{ + "per-class-prop-1": "overwritten by user", + }, + }, + Properties: []*models.Property{{ + Name: "Foo", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + ModuleConfig: map[string]interface{}{ + "my-module": map[string]interface{}{ + "per-prop-1": "prop overwritten by user", + }, + }, + }}, + Vectorizer: "my-module", + } + prop := &models.Property{ + DataType: []string{"boolean"}, + ModuleConfig: map[string]interface{}{ + "my-module": map[string]interface{}{ + "per-prop-1": "overwritten by user", + }, + }, + Name: "newProp", + } + expected := &models.Property{ + DataType: []string{"boolean"}, + ModuleConfig: map[string]interface{}{ + "my-module": map[string]interface{}{ + "per-prop-1": "overwritten by user", + "per-prop-2": "prop default value", + }, + }, + Name: "newProp", + } + + logger, _ := test.NewNullLogger() + p := NewProvider(logger, config.Config{}) + p.Register(&dummyModuleClassConfigurator{ + dummyText2VecModuleNoCapabilities: dummyText2VecModuleNoCapabilities{ + name: "my-module", + }, + }) + p.SetSinglePropertyDefaults(class, prop) + + assert.Equal(t, expected, prop, + "user specified module config is used, for rest the default value is used") +} + +func TestSetSinglePropertyDefaults_MixedVectors(t *testing.T) { + class := &models.Class{ + Class: "Foo", + Properties: []*models.Property{{ + Name: "Foo", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + ModuleConfig: map[string]interface{}{ + "my-module": map[string]interface{}{ + "per-prop-1": "prop overwritten by user", + }, + }, + }}, + Vectorizer: "my-module", + ModuleConfig: map[string]interface{}{ + "my-module": map[string]interface{}{ + "per-class-prop-1": "overwritten by user", + }, + }, + VectorIndexConfig: hnsw.NewDefaultUserConfig(), + VectorConfig: map[string]models.VectorConfig{ + "vec1": { + Vectorizer: map[string]interface{}{ + "my-module-2": map[string]interface{}{}, + }, + }, + }, + } + prop := &models.Property{ + DataType: []string{"boolean"}, + ModuleConfig: map[string]interface{}{ + "my-module": map[string]interface{}{ + "per-prop-1": "overwritten by user", + }, + }, + Name: "newProp", + } + expected := &models.Property{ + DataType: []string{"boolean"}, + ModuleConfig: map[string]interface{}{ + "my-module": map[string]interface{}{ + "per-prop-1": "overwritten by user", + "per-prop-2": "prop default value", + }, + "my-module-2": map[string]interface{}{ + "per-prop-1": "prop default value", + "per-prop-2": "prop default value", + }, + }, + Name: "newProp", + } + + logger, _ := test.NewNullLogger() + p := NewProvider(logger, config.Config{}) + p.Register(&dummyModuleClassConfigurator{ + dummyText2VecModuleNoCapabilities: dummyText2VecModuleNoCapabilities{ + name: "my-module", + }, + }) + p.Register(&dummyModuleClassConfigurator{ + dummyText2VecModuleNoCapabilities: dummyText2VecModuleNoCapabilities{ + name: "my-module-2", + }, + }) + p.SetSinglePropertyDefaults(class, prop) + + assert.Equal(t, expected, prop, + "user specified module config is used, for rest the default value is used") +} + +type dummyModuleClassConfigurator struct { + dummyText2VecModuleNoCapabilities + validateError error +} + +func (d *dummyModuleClassConfigurator) ClassConfigDefaults() map[string]interface{} { + return map[string]interface{}{ + "per-class-prop-1": "some default value", + "per-class-prop-2": "some default value", + } +} + +func (d *dummyModuleClassConfigurator) PropertyConfigDefaults( + dt *schema.DataType, +) map[string]interface{} { + return map[string]interface{}{ + "per-prop-1": "prop default value", + "per-prop-2": "prop default value", + } +} + +func (d *dummyModuleClassConfigurator) ValidateClass(ctx context.Context, + class *models.Class, cfg moduletools.ClassConfig, +) error { + return d.validateError +} + +var _ = modulecapabilities.ClassConfigurator( + &dummyModuleClassConfigurator{}) diff --git a/platform/dbops/binaries/weaviate-src/usecases/modules/module_config_test.go b/platform/dbops/binaries/weaviate-src/usecases/modules/module_config_test.go new file mode 100644 index 0000000000000000000000000000000000000000..67f6f41a1b8f528666ddcad31c3907fa6a4a8399 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/modules/module_config_test.go @@ -0,0 +1,93 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modules + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/entities/models" +) + +func TestClassBasedModuleConfig(t *testing.T) { + t.Run("when the prop doesn't exist", func(t *testing.T) { + class := &models.Class{ + Class: "Test", + } + cfg := NewClassBasedModuleConfig(class, "my-module", "tenant", "", nil) + assert.Equal(t, map[string]interface{}{}, cfg.Property("some-prop")) + }) + + t.Run("without any module-specific config", func(t *testing.T) { + class := &models.Class{ + Class: "Test", + Properties: []*models.Property{ + { + Name: "some-prop", + }, + }, + } + cfg := NewClassBasedModuleConfig(class, "my-module", "tenant", "", nil) + assert.Equal(t, map[string]interface{}{}, cfg.Class()) + assert.Equal(t, map[string]interface{}{}, cfg.Property("some-prop")) + }) + + t.Run("with config for other modules set", func(t *testing.T) { + class := &models.Class{ + Class: "Test", + ModuleConfig: map[string]interface{}{ + "other-module": map[string]interface{}{ + "classLevel": "foo", + }, + }, + Properties: []*models.Property{ + { + Name: "some-prop", + ModuleConfig: map[string]interface{}{ + "other-module": map[string]interface{}{ + "propLevel": "bar", + }, + }, + }, + }, + } + cfg := NewClassBasedModuleConfig(class, "my-module", "tenant", "", nil) + assert.Equal(t, map[string]interface{}{}, cfg.Class()) + assert.Equal(t, map[string]interface{}{}, + cfg.Property("some-prop")) + }) + + t.Run("with all config set", func(t *testing.T) { + class := &models.Class{ + Class: "Test", + ModuleConfig: map[string]interface{}{ + "my-module": map[string]interface{}{ + "classLevel": "foo", + }, + }, + Properties: []*models.Property{ + { + Name: "some-prop", + ModuleConfig: map[string]interface{}{ + "my-module": map[string]interface{}{ + "propLevel": "bar", + }, + }, + }, + }, + } + cfg := NewClassBasedModuleConfig(class, "my-module", "tenant", "", nil) + assert.Equal(t, map[string]interface{}{"classLevel": "foo"}, cfg.Class()) + assert.Equal(t, map[string]interface{}{"propLevel": "bar"}, + cfg.Property("some-prop")) + }) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/modules/modules.go b/platform/dbops/binaries/weaviate-src/usecases/modules/modules.go new file mode 100644 index 0000000000000000000000000000000000000000..d3e919aeafbfea8d6a40ba9b7c88803a544decde --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/modules/modules.go @@ -0,0 +1,1151 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modules + +import ( + "context" + "fmt" + "regexp" + "slices" + "sync" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/tailor-inc/graphql" + "github.com/tailor-inc/graphql/language/ast" + + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modelsext" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/moduletools" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/modulecomponents" +) + +var ( + internalSearchers = []string{ + "nearObject", "nearVector", "where", "group", "limit", "offset", + "after", "groupBy", "bm25", "hybrid", + } + internalAdditionalProperties = []string{"classification", "certainty", "id", "distance", "group"} +) + +type Provider struct { + vectorsLock sync.RWMutex + registered map[string]modulecapabilities.Module + altNames map[string]string + schemaGetter schemaGetter + hasMultipleVectorizers bool + targetVectorNameValidator *regexp.Regexp + logger logrus.FieldLogger + cfg config.Config +} + +type schemaGetter interface { + ReadOnlyClass(name string) *models.Class +} + +func NewProvider(logger logrus.FieldLogger, cfg config.Config) *Provider { + return &Provider{ + registered: map[string]modulecapabilities.Module{}, + altNames: map[string]string{}, + targetVectorNameValidator: regexp.MustCompile(`^` + schema.TargetVectorNameRegex + `$`), + logger: logger, + cfg: cfg, + } +} + +func (p *Provider) Register(mod modulecapabilities.Module) { + p.registered[mod.Name()] = mod + if modHasAltNames, ok := mod.(modulecapabilities.ModuleHasAltNames); ok { + for _, altName := range modHasAltNames.AltNames() { + p.altNames[altName] = mod.Name() + } + } +} + +func (p *Provider) GetByName(name string) modulecapabilities.Module { + if mod, ok := p.registered[name]; ok { + return mod + } + if origName, ok := p.altNames[name]; ok { + return p.registered[origName] + } + return nil +} + +func (p *Provider) GetAll() []modulecapabilities.Module { + out := make([]modulecapabilities.Module, len(p.registered)) + i := 0 + for _, mod := range p.registered { + out[i] = mod + i++ + } + + return out +} + +func (p *Provider) GetAllWithHTTPHandlers() []modulecapabilities.ModuleWithHTTPHandlers { + out := make([]modulecapabilities.ModuleWithHTTPHandlers, 0) + for _, mod := range p.registered { + if modWithHTTPHandlers, ok := mod.(modulecapabilities.ModuleWithHTTPHandlers); ok { + out = append(out, modWithHTTPHandlers) + } + } + + return out +} + +func (p *Provider) GetAllExclude(module string) []modulecapabilities.Module { + filtered := []modulecapabilities.Module{} + for _, mod := range p.GetAll() { + if mod.Name() != module { + filtered = append(filtered, mod) + } + } + return filtered +} + +func (p *Provider) Close() error { + for _, mod := range p.registered { + if m, ok := mod.(modulecapabilities.ModuleWithClose); ok { + if err := m.Close(); err != nil { + return err + } + } + } + + return nil +} + +func (p *Provider) SetSchemaGetter(sg schemaGetter) { + p.schemaGetter = sg +} + +func (p *Provider) Init(ctx context.Context, + params moduletools.ModuleInitParams, logger logrus.FieldLogger, +) error { + for i, mod := range p.GetAll() { + if err := mod.Init(ctx, params); err != nil { + return errors.Wrapf(err, "init module %d (%q)", i, mod.Name()) + } else { + logger.WithField("action", "startup"). + WithField("module", mod.Name()). + Debug("initialized module") + } + } + for i, mod := range p.GetAll() { + if modExtension, ok := mod.(modulecapabilities.ModuleExtension); ok { + if err := modExtension.InitExtension(p.GetAllExclude(mod.Name())); err != nil { + return errors.Wrapf(err, "init module extension %d (%q)", i, mod.Name()) + } else { + logger.WithField("action", "startup"). + WithField("module", mod.Name()). + Debug("initialized module extension") + } + } + } + for i, mod := range p.GetAll() { + if modDependency, ok := mod.(modulecapabilities.ModuleDependency); ok { + if err := modDependency.InitDependency(p.GetAllExclude(mod.Name())); err != nil { + return errors.Wrapf(err, "init module dependency %d (%q)", i, mod.Name()) + } else { + logger.WithField("action", "startup"). + WithField("module", mod.Name()). + Debug("initialized module dependency") + } + } + } + if err := p.validate(); err != nil { + return errors.Wrap(err, "validate modules") + } + if p.HasMultipleVectorizers() { + logger.Warn("Multiple vector spaces are present, GraphQL Explore and REST API list objects endpoint module include params has been disabled as a result.") + } + return nil +} + +func (p *Provider) validate() error { + searchers := map[string][]string{} + additionalGraphQLProps := map[string][]string{} + additionalRestAPIProps := map[string][]string{} + for _, mod := range p.GetAll() { + if module, ok := mod.(modulecapabilities.GraphQLArguments); ok { + allArguments := []string{} + for paraName, argument := range module.Arguments() { + if argument.ExtractFunction != nil { + allArguments = append(allArguments, paraName) + } + } + searchers = p.scanProperties(searchers, allArguments, mod.Name()) + } + if module, ok := mod.(modulecapabilities.AdditionalProperties); ok { + allAdditionalRestAPIProps, allAdditionalGrapQLProps := p.getAdditionalProps(module.AdditionalProperties()) + additionalGraphQLProps = p.scanProperties(additionalGraphQLProps, + allAdditionalGrapQLProps, mod.Name()) + additionalRestAPIProps = p.scanProperties(additionalRestAPIProps, + allAdditionalRestAPIProps, mod.Name()) + } + } + + var errorMessages []string + errorMessages = append(errorMessages, + p.validateModules("searcher", searchers, internalSearchers)...) + errorMessages = append(errorMessages, + p.validateModules("graphql additional property", additionalGraphQLProps, internalAdditionalProperties)...) + errorMessages = append(errorMessages, + p.validateModules("rest api additional property", additionalRestAPIProps, internalAdditionalProperties)...) + if len(errorMessages) > 0 { + return errors.Errorf("%v", errorMessages) + } + + return nil +} + +func (p *Provider) scanProperties(result map[string][]string, properties []string, module string) map[string][]string { + for i := range properties { + if result[properties[i]] == nil { + result[properties[i]] = []string{} + } + modules := result[properties[i]] + modules = append(modules, module) + result[properties[i]] = modules + } + return result +} + +func (p *Provider) getAdditionalProps(additionalProps map[string]modulecapabilities.AdditionalProperty) ([]string, []string) { + restProps := []string{} + graphQLProps := []string{} + + for _, additionalProperty := range additionalProps { + if additionalProperty.RestNames != nil { + restProps = append(restProps, additionalProperty.RestNames...) + } + if additionalProperty.GraphQLNames != nil { + graphQLProps = append(graphQLProps, additionalProperty.GraphQLNames...) + } + } + return restProps, graphQLProps +} + +func (p *Provider) validateModules(name string, properties map[string][]string, internalProperties []string) []string { + errorMessages := []string{} + for propertyName, modules := range properties { + for i := range internalProperties { + if internalProperties[i] == propertyName { + errorMessages = append(errorMessages, + fmt.Sprintf("%s: %s conflicts with weaviate's internal searcher in modules: %v", + name, propertyName, modules)) + } + } + if len(modules) > 1 { + p.hasMultipleVectorizers = true + } + for _, moduleName := range modules { + moduleType := p.GetByName(moduleName).Type() + if p.moduleProvidesMultipleVectorizers(moduleType) { + p.hasMultipleVectorizers = true + } + } + } + return errorMessages +} + +func (p *Provider) moduleProvidesMultipleVectorizers(moduleType modulecapabilities.ModuleType) bool { + return moduleType == modulecapabilities.Text2ManyVec +} + +func (p *Provider) isOnlyOneModuleEnabledOfAGivenType(moduleType modulecapabilities.ModuleType) bool { + i := 0 + for _, mod := range p.registered { + if mod.Type() == moduleType { + i++ + } + } + return i == 1 +} + +func (p *Provider) IsGenerative(modName string) bool { + mod := p.GetByName(modName) + if mod == nil { + return false + } + return mod.Type() == modulecapabilities.Text2TextGenerative +} + +func (p *Provider) IsReranker(modName string) bool { + mod := p.GetByName(modName) + if mod == nil { + return false + } + return mod.Type() == modulecapabilities.Text2TextReranker +} + +func (p *Provider) IsMultiVector(modName string) bool { + mod := p.GetByName(modName) + if mod == nil { + return false + } + return mod.Type() == modulecapabilities.Text2Multivec || mod.Type() == modulecapabilities.Multi2Multivec +} + +func (p *Provider) isVectorizerModule(moduleType modulecapabilities.ModuleType) bool { + switch moduleType { + case modulecapabilities.Text2Vec, + modulecapabilities.Img2Vec, + modulecapabilities.Multi2Vec, + modulecapabilities.Text2ManyVec, + modulecapabilities.Ref2Vec, + modulecapabilities.Text2Multivec, + modulecapabilities.Multi2Multivec: + return true + default: + return false + } +} + +func (p *Provider) isGenerativeModule(moduleType modulecapabilities.ModuleType) bool { + return moduleType == modulecapabilities.Text2TextGenerative +} + +func (p *Provider) shouldIncludeClassArgument(class *models.Class, module string, + moduleType modulecapabilities.ModuleType, altNames []string, +) bool { + if p.isVectorizerModule(moduleType) { + for _, vectorConfig := range class.VectorConfig { + if vectorizer, ok := vectorConfig.Vectorizer.(map[string]interface{}); ok { + if _, ok := vectorizer[module]; ok { + return true + } else if len(altNames) > 0 { + for _, altName := range altNames { + if _, ok := vectorizer[altName]; ok { + return true + } + } + } + } + } + for _, altName := range altNames { + if class.Vectorizer == altName { + return true + } + } + return class.Vectorizer == module + } + if moduleConfig, ok := class.ModuleConfig.(map[string]interface{}); ok { + if _, ok := moduleConfig[module]; ok { + return true + } else if len(altNames) > 0 { + for _, altName := range altNames { + if _, ok := moduleConfig[altName]; ok { + return true + } + } + } + } + // Allow Text2Text (QnA, Generative, Summarize, NER) modules to be registered to a given class + // only if there's no configuration present and there's only one module of a given type enabled + return p.isOnlyOneModuleEnabledOfAGivenType(moduleType) +} + +func (p *Provider) shouldCrossClassIncludeClassArgument(class *models.Class, module string, + moduleType modulecapabilities.ModuleType, altNames []string, +) bool { + if class == nil { + return !p.HasMultipleVectorizers() + } + return p.shouldIncludeClassArgument(class, module, moduleType, altNames) +} + +func (p *Provider) shouldIncludeArgument(schema *models.Schema, module string, + moduleType modulecapabilities.ModuleType, altNames []string, +) bool { + for _, c := range schema.Classes { + if p.shouldIncludeClassArgument(c, module, moduleType, altNames) { + return true + } + } + return false +} + +func (p *Provider) shouldAddGenericArgument(class *models.Class, moduleType modulecapabilities.ModuleType) bool { + if p.isGenerativeModule(moduleType) { + return true + } + return p.hasMultipleVectorizersConfig(class) && p.isVectorizerModule(moduleType) +} + +func (p *Provider) hasMultipleVectorizersConfig(class *models.Class) bool { + return len(class.VectorConfig) > 0 +} + +func (p *Provider) shouldCrossClassAddGenericArgument(schema *models.Schema, moduleType modulecapabilities.ModuleType) bool { + for _, c := range schema.Classes { + if p.shouldAddGenericArgument(c, moduleType) { + return true + } + } + return false +} + +func (p *Provider) getGenericArgument(name, className string, + argumentType modulecomponents.ArgumentType, +) *graphql.ArgumentConfig { + var nearTextTransformer modulecapabilities.TextTransform + if name == "nearText" { + // nearText argument might be exposed with an extension, we need to check + // if text transformers module is enabled if so then we need to init nearText + // argument with this extension + for _, mod := range p.GetAll() { + if arg, ok := mod.(modulecapabilities.TextTransformers); ok { + if arg != nil && arg.TextTransformers() != nil { + nearTextTransformer = arg.TextTransformers()["nearText"] + break + } + } + } + } + return modulecomponents.GetGenericArgument(name, className, argumentType, nearTextTransformer) +} + +func (p *Provider) getGenericAdditionalProperty(name string, class *models.Class) *modulecapabilities.AdditionalProperty { + if p.hasMultipleVectorizersConfig(class) { + return modulecomponents.GetGenericAdditionalProperty(name, class.Class) + } + return nil +} + +// GetArguments provides GraphQL Get arguments +func (p *Provider) GetArguments(class *models.Class) map[string]*graphql.ArgumentConfig { + arguments := map[string]*graphql.ArgumentConfig{} + for _, module := range p.GetAll() { + if p.shouldIncludeClassArgument(class, module.Name(), module.Type(), p.getModuleAltNames(module)) { + if arg, ok := module.(modulecapabilities.GraphQLArguments); ok { + for name, argument := range arg.Arguments() { + if argument.GetArgumentsFunction != nil { + if p.shouldAddGenericArgument(class, module.Type()) { + if _, ok := arguments[name]; !ok { + arguments[name] = p.getGenericArgument(name, class.Class, modulecomponents.Get) + } + } else { + arguments[name] = argument.GetArgumentsFunction(class.Class) + } + } + } + } + } + } + + return arguments +} + +func (p *Provider) getModuleAltNames(module modulecapabilities.Module) []string { + if moduleWithAltNames, ok := module.(modulecapabilities.ModuleHasAltNames); ok { + return moduleWithAltNames.AltNames() + } + return nil +} + +func (p *Provider) isModuleNameEqual(module modulecapabilities.Module, targetModule string) bool { + if module.Name() == targetModule { + return true + } + if altNames := p.getModuleAltNames(module); len(altNames) > 0 { + if slices.Contains(altNames, targetModule) { + return true + } + } + return false +} + +// AggregateArguments provides GraphQL Aggregate arguments +func (p *Provider) AggregateArguments(class *models.Class) map[string]*graphql.ArgumentConfig { + arguments := map[string]*graphql.ArgumentConfig{} + for _, module := range p.GetAll() { + if p.shouldIncludeClassArgument(class, module.Name(), module.Type(), p.getModuleAltNames(module)) { + if arg, ok := module.(modulecapabilities.GraphQLArguments); ok { + for name, argument := range arg.Arguments() { + if argument.AggregateArgumentsFunction != nil { + if p.shouldAddGenericArgument(class, module.Type()) { + if _, ok := arguments[name]; !ok { + arguments[name] = p.getGenericArgument(name, class.Class, modulecomponents.Aggregate) + } + } else { + arguments[name] = argument.AggregateArgumentsFunction(class.Class) + } + } + } + } + } + } + return arguments +} + +// ExploreArguments provides GraphQL Explore arguments +func (p *Provider) ExploreArguments(schema *models.Schema) map[string]*graphql.ArgumentConfig { + arguments := map[string]*graphql.ArgumentConfig{} + for _, module := range p.GetAll() { + if p.shouldIncludeArgument(schema, module.Name(), module.Type(), p.getModuleAltNames(module)) { + if arg, ok := module.(modulecapabilities.GraphQLArguments); ok { + for name, argument := range arg.Arguments() { + if argument.ExploreArgumentsFunction != nil { + if p.shouldCrossClassAddGenericArgument(schema, module.Type()) { + if _, ok := arguments[name]; !ok { + arguments[name] = p.getGenericArgument(name, "", modulecomponents.Explore) + } + } else { + arguments[name] = argument.ExploreArgumentsFunction() + } + } + } + } + } + } + return arguments +} + +// CrossClassExtractSearchParams extracts GraphQL arguments from modules without +// being specific to any one class and it's configuration. This is used in +// Explore() { } for example +func (p *Provider) CrossClassExtractSearchParams(arguments map[string]interface{}) map[string]interface{} { + // explore does not support target vectors + params, _ := p.extractSearchParams(arguments, nil) + return params +} + +// ExtractSearchParams extracts GraphQL arguments +func (p *Provider) ExtractSearchParams(arguments map[string]interface{}, className string) (map[string]interface{}, map[string]*dto.TargetCombination) { + class, err := p.getClass(className) + if err != nil { + return map[string]interface{}{}, nil + } + return p.extractSearchParams(arguments, class) +} + +func (p *Provider) extractSearchParams(arguments map[string]interface{}, class *models.Class) (map[string]interface{}, map[string]*dto.TargetCombination) { + exractedParams := map[string]interface{}{} + exractedCombination := map[string]*dto.TargetCombination{} + for _, module := range p.GetAll() { + if p.shouldCrossClassIncludeClassArgument(class, module.Name(), module.Type(), p.getModuleAltNames(module)) { + if args, ok := module.(modulecapabilities.GraphQLArguments); ok { + for paramName, argument := range args.Arguments() { + if param, ok := arguments[paramName]; ok && argument.ExtractFunction != nil { + extracted, combination, err := argument.ExtractFunction(param.(map[string]interface{})) + if err != nil { + continue + } + exractedParams[paramName] = extracted + exractedCombination[paramName] = combination + } + } + } + } + } + return exractedParams, exractedCombination +} + +// CrossClassValidateSearchParam validates module parameters without +// being specific to any one class and it's configuration. This is used in +// Explore() { } for example +func (p *Provider) CrossClassValidateSearchParam(name string, value interface{}) error { + return p.validateSearchParam(name, value, nil) +} + +// ValidateSearchParam validates module parameters +func (p *Provider) ValidateSearchParam(name string, value interface{}, className string) error { + class, err := p.getClass(className) + if err != nil { + return err + } + + return p.validateSearchParam(name, value, class) +} + +func (p *Provider) validateSearchParam(name string, value interface{}, class *models.Class) error { + for _, module := range p.GetAll() { + if p.shouldCrossClassIncludeClassArgument(class, module.Name(), module.Type(), p.getModuleAltNames(module)) { + if args, ok := module.(modulecapabilities.GraphQLArguments); ok { + for paramName, argument := range args.Arguments() { + if paramName == name && argument.ValidateFunction != nil { + return argument.ValidateFunction(value) + } + } + } + } + } + + return fmt.Errorf("could not vectorize input for collection %v with search-type %v. Make sure a vectorizer module is configured for this collection", class.Class, name) +} + +// GetAdditionalFields provides GraphQL Get additional fields +func (p *Provider) GetAdditionalFields(class *models.Class) map[string]*graphql.Field { + additionalProperties := map[string]*graphql.Field{} + additionalGenerativeDefaultProvider := "" + additionalGenerativeParameters := map[string]modulecapabilities.GenerativeProperty{} + for _, module := range p.GetAll() { + if p.isGenerativeModule(module.Type()) { + if arg, ok := module.(modulecapabilities.AdditionalGenerativeProperties); ok { + for name, additionalGenerativeParameter := range arg.AdditionalGenerativeProperties() { + additionalGenerativeParameters[name] = additionalGenerativeParameter + if p.shouldIncludeClassArgument(class, module.Name(), module.Type(), p.getModuleAltNames(module)) { + additionalGenerativeDefaultProvider = name + } + } + } + } else if p.shouldIncludeClassArgument(class, module.Name(), module.Type(), p.getModuleAltNames(module)) { + if arg, ok := module.(modulecapabilities.AdditionalProperties); ok { + for name, additionalProperty := range arg.AdditionalProperties() { + if additionalProperty.GraphQLFieldFunction != nil { + if genericAdditionalProperty := p.getGenericAdditionalProperty(name, class); genericAdditionalProperty != nil { + if genericAdditionalProperty.GraphQLFieldFunction != nil { + if _, ok := additionalProperties[name]; !ok { + additionalProperties[name] = genericAdditionalProperty.GraphQLFieldFunction(class.Class) + } + } + } else { + additionalProperties[name] = additionalProperty.GraphQLFieldFunction(class.Class) + } + } + } + } + } + } + if len(additionalGenerativeParameters) > 0 { + if generateFn := modulecomponents.GetGenericGenerateProperty(class.Class, additionalGenerativeParameters, additionalGenerativeDefaultProvider, p.logger); generateFn != nil { + additionalProperties[modulecomponents.AdditionalPropertyGenerate] = generateFn.GraphQLFieldFunction(class.Class) + } + } + return additionalProperties +} + +// ExtractAdditionalField extracts additional properties from given graphql arguments +func (p *Provider) ExtractAdditionalField(className, name string, params []*ast.Argument) interface{} { + class, err := p.getClass(className) + if err != nil { + return err + } + additionalGenerativeDefaultProvider := "" + additionalGenerativeParameters := map[string]modulecapabilities.GenerativeProperty{} + for _, module := range p.GetAll() { + if name == modulecomponents.AdditionalPropertyGenerate { + if p.isGenerativeModule(module.Type()) { + if arg, ok := module.(modulecapabilities.AdditionalGenerativeProperties); ok { + for name, additionalGenerativeParameter := range arg.AdditionalGenerativeProperties() { + additionalGenerativeParameters[name] = additionalGenerativeParameter + if p.shouldIncludeClassArgument(class, module.Name(), module.Type(), p.getModuleAltNames(module)) { + additionalGenerativeDefaultProvider = name + } + } + } + } + } else if p.shouldIncludeClassArgument(class, module.Name(), module.Type(), p.getModuleAltNames(module)) { + if arg, ok := module.(modulecapabilities.AdditionalProperties); ok { + if additionalProperties := arg.AdditionalProperties(); len(additionalProperties) > 0 { + if additionalProperty, ok := additionalProperties[name]; ok { + return additionalProperty.GraphQLExtractFunction(params, class) + } + } + } + } + } + if name == modulecomponents.AdditionalPropertyGenerate { + if generateFn := modulecomponents.GetGenericGenerateProperty(class.Class, additionalGenerativeParameters, additionalGenerativeDefaultProvider, p.logger); generateFn != nil { + return generateFn.GraphQLExtractFunction(params, class) + } + } + return nil +} + +// GetObjectAdditionalExtend extends rest api get queries with additional properties +func (p *Provider) GetObjectAdditionalExtend(ctx context.Context, + in *search.Result, moduleParams map[string]interface{}, +) (*search.Result, error) { + resArray, err := p.additionalExtend(ctx, search.Results{*in}, moduleParams, nil, "ObjectGet", nil) + if err != nil { + return nil, err + } + return &resArray[0], nil +} + +// ListObjectsAdditionalExtend extends rest api list queries with additional properties +func (p *Provider) ListObjectsAdditionalExtend(ctx context.Context, + in search.Results, moduleParams map[string]interface{}, +) (search.Results, error) { + return p.additionalExtend(ctx, in, moduleParams, nil, "ObjectList", nil) +} + +// GetExploreAdditionalExtend extends graphql api get queries with additional properties +func (p *Provider) GetExploreAdditionalExtend(ctx context.Context, in []search.Result, + moduleParams map[string]interface{}, searchVector models.Vector, + argumentModuleParams map[string]interface{}, +) ([]search.Result, error) { + return p.additionalExtend(ctx, in, moduleParams, searchVector, "ExploreGet", argumentModuleParams) +} + +// ListExploreAdditionalExtend extends graphql api list queries with additional properties +func (p *Provider) ListExploreAdditionalExtend(ctx context.Context, in []search.Result, moduleParams map[string]interface{}, argumentModuleParams map[string]interface{}) ([]search.Result, error) { + return p.additionalExtend(ctx, in, moduleParams, nil, "ExploreList", argumentModuleParams) +} + +func (p *Provider) additionalExtend(ctx context.Context, in []search.Result, moduleParams map[string]interface{}, searchVector models.Vector, capability string, argumentModuleParams map[string]interface{}) ([]search.Result, error) { + toBeExtended := in + if len(toBeExtended) > 0 { + class, err := p.getClassFromSearchResult(toBeExtended) + if err != nil { + return nil, err + } + + additionalGenerativeDefaultProvider := "" + additionalGenerativeParameters := map[string]modulecapabilities.GenerativeProperty{} + allAdditionalProperties := map[string]modulecapabilities.AdditionalProperty{} + for _, module := range p.GetAll() { + if p.isGenerativeModule(module.Type()) { + if arg, ok := module.(modulecapabilities.AdditionalGenerativeProperties); ok { + for name, additionalGenerativeParameter := range arg.AdditionalGenerativeProperties() { + additionalGenerativeParameters[name] = additionalGenerativeParameter + if p.shouldIncludeClassArgument(class, module.Name(), module.Type(), p.getModuleAltNames(module)) { + additionalGenerativeDefaultProvider = name + } + } + } + } else if p.shouldIncludeClassArgument(class, module.Name(), module.Type(), p.getModuleAltNames(module)) { + if arg, ok := module.(modulecapabilities.AdditionalProperties); ok { + if arg != nil && arg.AdditionalProperties() != nil { + for name, additionalProperty := range arg.AdditionalProperties() { + allAdditionalProperties[name] = additionalProperty + } + } + } + } + } + if len(additionalGenerativeParameters) > 0 { + if generateFn := modulecomponents.GetGenericGenerateProperty(class.Class, additionalGenerativeParameters, additionalGenerativeDefaultProvider, p.logger); generateFn != nil { + allAdditionalProperties[modulecomponents.AdditionalPropertyGenerate] = *generateFn + } + } + + if len(allAdditionalProperties) > 0 { + if err := p.checkCapabilities(allAdditionalProperties, moduleParams, capability); err != nil { + return nil, err + } + cfg := NewClassBasedModuleConfig(class, "", "", "", &p.cfg) + for name, value := range moduleParams { + additionalPropertyFn := p.getAdditionalPropertyFn(allAdditionalProperties[name], capability) + if additionalPropertyFn != nil && value != nil { + searchValue := value + if searchVectorValue, ok := value.(modulecapabilities.AdditionalPropertyWithSearchVector[[]float32]); ok { + if vec, ok := searchVector.([]float32); ok { + searchVectorValue.SetSearchVector(vec) + searchValue = searchVectorValue + } else { + return nil, errors.Errorf("extend %s: set search vector unrecongnized type: %T", name, searchVector) + } + } else if searchVectorValue, ok := value.(modulecapabilities.AdditionalPropertyWithSearchVector[[][]float32]); ok { + if vec, ok := searchVector.([][]float32); ok { + searchVectorValue.SetSearchVector(vec) + searchValue = searchVectorValue + } else { + return nil, errors.Errorf("extend %s: set search multi vector unrecongnized type: %T", name, searchVector) + } + } + resArray, err := additionalPropertyFn(ctx, toBeExtended, searchValue, nil, argumentModuleParams, cfg) + if err != nil { + return nil, errors.Errorf("extend %s: %v", name, err) + } + toBeExtended = resArray + } else { + return nil, errors.Errorf("unknown capability: %s", name) + } + } + } + } + return toBeExtended, nil +} + +func (p *Provider) getClassFromSearchResult(in []search.Result) (*models.Class, error) { + if len(in) > 0 { + return p.getClass(in[0].ClassName) + } + return nil, errors.Errorf("unknown class") +} + +func (p *Provider) checkCapabilities(additionalProperties map[string]modulecapabilities.AdditionalProperty, + moduleParams map[string]interface{}, capability string, +) error { + for name := range moduleParams { + additionalPropertyFn := p.getAdditionalPropertyFn(additionalProperties[name], capability) + if additionalPropertyFn == nil { + return errors.Errorf("unknown capability: %s", name) + } + } + return nil +} + +func (p *Provider) getAdditionalPropertyFn( + additionalProperty modulecapabilities.AdditionalProperty, + capability string, +) modulecapabilities.AdditionalPropertyFn { + switch capability { + case "ObjectGet": + return additionalProperty.SearchFunctions.ObjectGet + case "ObjectList": + return additionalProperty.SearchFunctions.ObjectList + case "ExploreGet": + return additionalProperty.SearchFunctions.ExploreGet + case "ExploreList": + return additionalProperty.SearchFunctions.ExploreList + default: + return nil + } +} + +// GraphQLAdditionalFieldNames get's all additional field names used in graphql +func (p *Provider) GraphQLAdditionalFieldNames() []string { + additionalPropertiesNames := map[string]struct{}{} + for _, module := range p.GetAll() { + if arg, ok := module.(modulecapabilities.AdditionalProperties); ok { + for _, additionalProperty := range arg.AdditionalProperties() { + for _, gqlName := range additionalProperty.GraphQLNames { + additionalPropertiesNames[gqlName] = struct{}{} + } + } + } else if _, ok := module.(modulecapabilities.AdditionalGenerativeProperties); ok { + additionalPropertiesNames[modulecomponents.AdditionalPropertyGenerate] = struct{}{} + } + } + var availableAdditionalPropertiesNames []string + for gqlName := range additionalPropertiesNames { + availableAdditionalPropertiesNames = append(availableAdditionalPropertiesNames, gqlName) + } + return availableAdditionalPropertiesNames +} + +// RestApiAdditionalProperties get's all rest specific additional properties with their +// default values +func (p *Provider) RestApiAdditionalProperties(includeProp string, class *models.Class) map[string]interface{} { + moduleParams := map[string]interface{}{} + for _, module := range p.GetAll() { + if p.shouldCrossClassIncludeClassArgument(class, module.Name(), module.Type(), p.getModuleAltNames(module)) { + if arg, ok := module.(modulecapabilities.AdditionalProperties); ok { + for name, additionalProperty := range arg.AdditionalProperties() { + for _, includePropName := range additionalProperty.RestNames { + if includePropName == includeProp && moduleParams[name] == nil { + moduleParams[name] = additionalProperty.DefaultValue + } + } + } + } + } + } + return moduleParams +} + +func (p *Provider) TargetsFromSearchParam(className string, params interface{}) ([]string, error) { + class, err := p.getClass(className) + if err != nil { + return nil, err + } + targetVectors, err := p.getTargetVector(class, params) + if err != nil { + return nil, err + } + + return targetVectors, nil +} + +func (p *Provider) IsTargetVectorMultiVector(className, targetVector string) (bool, error) { + class, err := p.getClass(className) + if err != nil { + return false, err + } + targetModule := p.getModuleNameForTargetVector(class, targetVector) + return p.IsMultiVector(targetModule), nil +} + +// VectorFromSearchParam gets a vector for a given argument. This is used in +// Get { Class() } for example +func (p *Provider) VectorFromSearchParam(ctx context.Context, className, targetVector, tenant, param string, params interface{}, + findVectorFn modulecapabilities.FindVectorFn[[]float32], +) ([]float32, error) { + class, err := p.getClass(className) + if err != nil { + return nil, err + } + + targetModule := p.getModuleNameForTargetVector(class, targetVector) + + for _, mod := range p.GetAll() { + if found, vector, err := vectorFromSearchParam(ctx, class, mod, targetModule, targetVector, tenant, param, params, findVectorFn, p.isModuleNameEqual, &p.cfg); found { + return vector, err + } + } + + return nil, fmt.Errorf("could not vectorize input for collection %v with search-type %v, targetVector %v and parameters %v. Make sure a vectorizer module is configured for this class", className, param, targetVector, params) +} + +// MultiVectorFromSearchParam gets a multi vector for a given argument. This is used in +// Get { Class() } for example +func (p *Provider) MultiVectorFromSearchParam(ctx context.Context, className, targetVector, tenant, param string, params interface{}, + findVectorFn modulecapabilities.FindVectorFn[[][]float32], +) ([][]float32, error) { + class, err := p.getClass(className) + if err != nil { + return nil, err + } + + targetModule := p.getModuleNameForTargetVector(class, targetVector) + + for _, mod := range p.GetAll() { + if found, vector, err := vectorFromSearchParam(ctx, class, mod, targetModule, targetVector, tenant, param, params, findVectorFn, p.isModuleNameEqual, &p.cfg); found { + return vector, err + } + } + + return nil, fmt.Errorf("could not vectorize input for collection %v with search-type %v, targetVector %v and parameters %v. Make sure a vectorizer module is configured for this class", className, param, targetVector, params) +} + +// CrossClassVectorFromSearchParam gets a vector for a given argument without +// being specific to any one class and it's configuration. This is used in +// Explore() { } for example +func (p *Provider) CrossClassVectorFromSearchParam(ctx context.Context, + param string, params interface{}, + findVectorFn modulecapabilities.FindVectorFn[[]float32], +) ([]float32, string, error) { + for _, mod := range p.GetAll() { + if found, vector, targetVector, err := crossClassVectorFromSearchParam(ctx, mod, param, params, findVectorFn, p.getTargetVector, p.cfg); found { + return vector, targetVector, err + } + } + + return nil, "", fmt.Errorf("could not vectorize input for Explore with search-type %v and parameters %v. Make sure a vectorizer module is configured", param, params) +} + +// MultiCrossClassVectorFromSearchParam gets a multi vector for a given argument without +// being specific to any one class and it's configuration. This is used in +// Explore() { } for example +func (p *Provider) MultiCrossClassVectorFromSearchParam(ctx context.Context, + param string, params interface{}, + findVectorFn modulecapabilities.FindVectorFn[[][]float32], +) ([][]float32, string, error) { + for _, mod := range p.GetAll() { + if found, vector, targetVector, err := crossClassVectorFromSearchParam(ctx, mod, param, params, findVectorFn, p.getTargetVector, p.cfg); found { + return vector, targetVector, err + } + } + + return nil, "", fmt.Errorf("could not vectorize input for Explore with search-type %v and parameters %v. Make sure a vectorizer module is configured", param, params) +} + +func (p *Provider) getTargetVector(class *models.Class, params interface{}) ([]string, error) { + if nearParam, ok := params.(modulecapabilities.NearParam); ok && len(nearParam.GetTargetVectors()) >= 1 { + return nearParam.GetTargetVectors(), nil + } + if class != nil { + if modelsext.ClassHasLegacyVectorIndex(class) { + return []string{""}, nil + } + + if len(class.VectorConfig) > 1 { + return nil, fmt.Errorf("multiple vectorizers configuration found, please specify target vector name") + } + + if len(class.VectorConfig) == 1 { + for name := range class.VectorConfig { + return []string{name}, nil + } + } + } + return []string{""}, nil +} + +func (p *Provider) getModuleNameForTargetVector(class *models.Class, targetVector string) string { + if len(class.VectorConfig) > 0 { + if vectorConfig, ok := class.VectorConfig[targetVector]; ok { + if vectorizer, ok := vectorConfig.Vectorizer.(map[string]interface{}); ok && len(vectorizer) == 1 { + for moduleName := range vectorizer { + return moduleName + } + } + } + } + return class.Vectorizer +} + +func (p *Provider) VectorFromInput(ctx context.Context, + className, input, targetVector string, +) ([]float32, error) { + class, err := p.getClass(className) + if err != nil { + return nil, err + } + targetModule := p.getModuleNameForTargetVector(class, targetVector) + + for _, mod := range p.GetAll() { + if p.isModuleNameEqual(mod, targetModule) { + if p.shouldIncludeClassArgument(class, mod.Name(), mod.Type(), p.getModuleAltNames(mod)) { + if found, vector, err := vectorFromInput[[]float32](ctx, mod, class, input, targetVector, &p.cfg); found { + return vector, err + } + } + } + } + + return nil, fmt.Errorf("VectorFromInput was called without vectorizer on class %v for input %v", className, input) +} + +func (p *Provider) MultiVectorFromInput(ctx context.Context, + className, input, targetVector string, +) ([][]float32, error) { + class, err := p.getClass(className) + if err != nil { + return nil, err + } + targetModule := p.getModuleNameForTargetVector(class, targetVector) + + for _, mod := range p.GetAll() { + if p.isModuleNameEqual(mod, targetModule) { + if p.shouldIncludeClassArgument(class, mod.Name(), mod.Type(), p.getModuleAltNames(mod)) { + if found, vector, err := vectorFromInput[[][]float32](ctx, mod, class, input, targetVector, &p.cfg); found { + return vector, err + } + } + } + } + + return nil, fmt.Errorf("MultiVectorFromInput was called without vectorizer on class %v for input %v", className, input) +} + +// ParseClassifierSettings parses and adds classifier specific settings +func (p *Provider) ParseClassifierSettings(name string, + params *models.Classification, +) error { + class, err := p.getClass(params.Class) + if err != nil { + return err + } + for _, module := range p.GetAll() { + if p.shouldIncludeClassArgument(class, module.Name(), module.Type(), p.getModuleAltNames(module)) { + if c, ok := module.(modulecapabilities.ClassificationProvider); ok { + for _, classifier := range c.Classifiers() { + if classifier != nil && classifier.Name() == name { + return classifier.ParseClassifierSettings(params) + } + } + } + } + } + return nil +} + +// GetClassificationFn returns given module's classification +func (p *Provider) GetClassificationFn(className, name string, + params modulecapabilities.ClassifyParams, +) (modulecapabilities.ClassifyItemFn, error) { + class, err := p.getClass(className) + if err != nil { + return nil, err + } + for _, module := range p.GetAll() { + if p.shouldIncludeClassArgument(class, module.Name(), module.Type(), p.getModuleAltNames(module)) { + if c, ok := module.(modulecapabilities.ClassificationProvider); ok { + for _, classifier := range c.Classifiers() { + if classifier != nil && classifier.Name() == name { + return classifier.ClassifyFn(params) + } + } + } + } + } + return nil, errors.Errorf("classifier %s not found", name) +} + +// GetMeta returns meta information about modules +func (p *Provider) GetMeta() (map[string]interface{}, error) { + metaInfos := map[string]interface{}{} + for _, module := range p.GetAll() { + if c, ok := module.(modulecapabilities.MetaProvider); ok { + meta, err := c.MetaInfo() + if err != nil { + metaInfos[module.Name()] = map[string]interface{}{ + "error": err.Error(), + } + } else { + metaInfos[module.Name()] = meta + } + } + } + return metaInfos, nil +} + +func (p *Provider) getClass(className string) (*models.Class, error) { + class := p.schemaGetter.ReadOnlyClass(className) + if class == nil { + return nil, errors.Errorf("class %q not found in schema", className) + } + return class, nil +} + +func (p *Provider) HasMultipleVectorizers() bool { + return p.hasMultipleVectorizers +} + +func (p *Provider) BackupBackend(backend string) (modulecapabilities.BackupBackend, error) { + module := p.GetByName(backend) + if module != nil { + if module.Type() == modulecapabilities.Backup { + module_backend, ok := module.(modulecapabilities.BackupBackend) + if ok { + return module_backend, nil + } else { + return nil, errors.Errorf("backup: %s is not a backup backend (actual type: %T)", backend, module) + } + } else { + return nil, errors.Errorf("backup: %s is not a backup backend type", backend) + } + } + return nil, errors.Errorf("backup: %s not found", backend) +} + +func (p *Provider) OffloadBackend(backend string) (modulecapabilities.OffloadCloud, bool) { + if module := p.GetByName(backend); module != nil { + if module.Type() == modulecapabilities.Offload { + if backend, ok := module.(modulecapabilities.OffloadCloud); ok { + return backend, true + } + } + } + return nil, false +} + +func (p *Provider) EnabledBackupBackends() []modulecapabilities.BackupBackend { + var backends []modulecapabilities.BackupBackend + for _, mod := range p.GetAll() { + if backend, ok := mod.(modulecapabilities.BackupBackend); ok && + mod.Type() == modulecapabilities.Backup { + backends = append(backends, backend) + } + } + return backends +} + +func (p *Provider) UsageEnabled() bool { + for _, module := range p.GetAll() { + if module.Type() == modulecapabilities.Usage { + return true + } + } + return false +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/modules/modules_generic.go b/platform/dbops/binaries/weaviate-src/usecases/modules/modules_generic.go new file mode 100644 index 0000000000000000000000000000000000000000..8aadbe56a7f8c3594c036b9862d7fe1a0391306c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/modules/modules_generic.go @@ -0,0 +1,108 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modules + +import ( + "context" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/usecases/config" +) + +func vectorFromSearchParam[T dto.Embedding]( + ctx context.Context, + class *models.Class, + mod modulecapabilities.Module, + targetModule, targetVector, tenant, param string, + params interface{}, + findVectorFn modulecapabilities.FindVectorFn[T], + isModuleNameEqualFn func(module modulecapabilities.Module, targetModule string) bool, + dbConfig *config.Config, +) (bool, T, error) { + var moduleName string + var vectorSearches map[string]modulecapabilities.VectorForParams[T] + + if searcher, ok := mod.(modulecapabilities.Searcher[T]); ok { + if isModuleNameEqualFn(mod, targetModule) { + moduleName = mod.Name() + vectorSearches = searcher.VectorSearches() + } + } else if searchers, ok := mod.(modulecapabilities.DependencySearcher[T]); ok { + if dependencySearchers := searchers.VectorSearches(); dependencySearchers != nil { + moduleName = targetModule + vectorSearches = dependencySearchers[targetModule] + } + } + if vectorSearches != nil { + if searchVectorFn := vectorSearches[param]; searchVectorFn != nil { + cfg := NewClassBasedModuleConfig(class, moduleName, tenant, targetVector, dbConfig) + vector, err := searchVectorFn.VectorForParams(ctx, params, class.Class, findVectorFn, cfg) + if err != nil { + return true, nil, errors.Errorf("vectorize params: %v", err) + } + return true, vector, nil + } + } + + return false, nil, nil +} + +func crossClassVectorFromSearchParam[T dto.Embedding]( + ctx context.Context, + mod modulecapabilities.Module, + param string, + params interface{}, + findVectorFn modulecapabilities.FindVectorFn[T], + getTargetVectorFn func(class *models.Class, params interface{}) ([]string, error), + dbConfig config.Config, +) (bool, T, string, error) { + if searcher, ok := mod.(modulecapabilities.Searcher[T]); ok { + if vectorSearches := searcher.VectorSearches(); vectorSearches != nil { + if searchVectorFn := vectorSearches[param]; searchVectorFn != nil { + cfg := NewCrossClassModuleConfig() + vector, err := searchVectorFn.VectorForParams(ctx, params, "", findVectorFn, cfg) + if err != nil { + return true, nil, "", errors.Errorf("vectorize params: %v", err) + } + targetVector, err := getTargetVectorFn(nil, params) + if err != nil { + return true, nil, "", errors.Errorf("get target vector: %v", err) + } + if len(targetVector) > 0 { + return true, vector, targetVector[0], nil + } + return true, vector, "", nil + } + } + } + + return false, nil, "", nil +} + +func vectorFromInput[T dto.Embedding]( + ctx context.Context, + mod modulecapabilities.Module, + class *models.Class, + input, targetVector string, + dbConfig *config.Config, +) (bool, T, error) { + if vectorizer, ok := mod.(modulecapabilities.InputVectorizer[T]); ok { + // does not access any objects, therefore tenant is irrelevant + cfg := NewClassBasedModuleConfig(class, mod.Name(), "", targetVector, dbConfig) + vector, err := vectorizer.VectorizeInput(ctx, input, cfg) + return true, vector, err + } + return false, nil, nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/modules/modules_test.go b/platform/dbops/binaries/weaviate-src/usecases/modules/modules_test.go new file mode 100644 index 0000000000000000000000000000000000000000..923ac37f0fb6dc2d3cafb47d60089358ef9c857c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/modules/modules_test.go @@ -0,0 +1,550 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modules + +import ( + "context" + "fmt" + "io" + "testing" + + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/tailor-inc/graphql" + + "github.com/weaviate/weaviate/entities/backup" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/moduletools" + enitiesSchema "github.com/weaviate/weaviate/entities/schema" + ubackup "github.com/weaviate/weaviate/usecases/backup" + "github.com/weaviate/weaviate/usecases/config" +) + +func TestModulesProvider(t *testing.T) { + t.Run("should register simple module", func(t *testing.T) { + // given + logger, _ := test.NewNullLogger() + modulesProvider := NewProvider(logger, config.Config{}) + class := &models.Class{ + Class: "ClassOne", + Vectorizer: "mod1", + } + schema := &models.Schema{ + Classes: []*models.Class{class}, + } + schemaGetter := getFakeSchemaGetter() + modulesProvider.SetSchemaGetter(schemaGetter) + + params := map[string]interface{}{} + params["nearArgumentSomeParam"] = string("doesn't matter here") + arguments := map[string]interface{}{} + arguments["nearArgument"] = params + + // when + modulesProvider.Register(newGraphQLModule("mod1").withArg("nearArgument")) + err := modulesProvider.Init(context.Background(), nil, logger) + registered := modulesProvider.GetAll() + getArgs := modulesProvider.GetArguments(class) + exploreArgs := modulesProvider.ExploreArguments(schema) + extractedArgs, _ := modulesProvider.ExtractSearchParams(arguments, class.Class) + + // then + mod1 := registered[0] + assert.Nil(t, err) + assert.Equal(t, "mod1", mod1.Name()) + assert.NotNil(t, getArgs["nearArgument"]) + assert.NotNil(t, exploreArgs["nearArgument"]) + assert.NotNil(t, extractedArgs["nearArgument"]) + }) + + t.Run("should not register modules providing the same search param", func(t *testing.T) { + // given + logger, _ := test.NewNullLogger() + modulesProvider := NewProvider(logger, config.Config{}) + schemaGetter := getFakeSchemaGetter() + modulesProvider.SetSchemaGetter(schemaGetter) + + // when + modulesProvider.Register(newGraphQLModule("mod1").withArg("nearArgument")) + modulesProvider.Register(newGraphQLModule("mod2").withArg("nearArgument")) + err := modulesProvider.Init(context.Background(), nil, logger) + + // then + assert.Nil(t, err) + }) + + t.Run("should not register modules providing internal search param", func(t *testing.T) { + // given + logger, _ := test.NewNullLogger() + modulesProvider := NewProvider(logger, config.Config{}) + schemaGetter := getFakeSchemaGetter() + modulesProvider.SetSchemaGetter(schemaGetter) + + // when + modulesProvider.Register(newGraphQLModule("mod1").withArg("nearArgument")) + modulesProvider.Register(newGraphQLModule("mod3"). + withExtractFn("limit"). + withExtractFn("where"). + withExtractFn("nearVector"). + withExtractFn("nearObject"). + withExtractFn("group"), + ) + err := modulesProvider.Init(context.Background(), nil, logger) + + // then + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "nearObject conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "nearVector conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "where conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "group conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "limit conflicts with weaviate's internal searcher in modules: [mod3]") + }) + + t.Run("should not register modules providing faulty params", func(t *testing.T) { + // given + logger, _ := test.NewNullLogger() + modulesProvider := NewProvider(logger, config.Config{}) + schemaGetter := getFakeSchemaGetter() + modulesProvider.SetSchemaGetter(schemaGetter) + + // when + modulesProvider.Register(newGraphQLModule("mod1").withArg("nearArgument")) + modulesProvider.Register(newGraphQLModule("mod2").withArg("nearArgument")) + modulesProvider.Register(newGraphQLModule("mod3"). + withExtractFn("limit"). + withExtractFn("where"). + withExtractFn("nearVector"). + withExtractFn("nearObject"). + withExtractFn("group"), + ) + err := modulesProvider.Init(context.Background(), nil, logger) + + // then + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "nearObject conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "nearVector conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "where conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "group conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "limit conflicts with weaviate's internal searcher in modules: [mod3]") + }) + + t.Run("should register simple additional property module", func(t *testing.T) { + // given + logger, _ := test.NewNullLogger() + modulesProvider := NewProvider(logger, config.Config{}) + class := &models.Class{ + Class: "ClassOne", + Vectorizer: "mod1", + } + schema := &models.Schema{ + Classes: []*models.Class{class}, + } + schemaGetter := getFakeSchemaGetter() + modulesProvider.SetSchemaGetter(schemaGetter) + + params := map[string]interface{}{} + params["nearArgumentSomeParam"] = string("doesn't matter here") + arguments := map[string]interface{}{} + arguments["nearArgument"] = params + + // when + modulesProvider.Register(newGraphQLAdditionalModule("mod1"). + withGraphQLArg("featureProjection", []string{"featureProjection"}). + withGraphQLArg("interpretation", []string{"interpretation"}). + withRestApiArg("featureProjection", []string{"featureProjection", "fp", "f-p"}). + withRestApiArg("interpretation", []string{"interpretation"}). + withArg("nearArgument"), + ) + err := modulesProvider.Init(context.Background(), nil, logger) + registered := modulesProvider.GetAll() + getArgs := modulesProvider.GetArguments(class) + exploreArgs := modulesProvider.ExploreArguments(schema) + extractedArgs, _ := modulesProvider.ExtractSearchParams(arguments, class.Class) + restApiFPArgs := modulesProvider.RestApiAdditionalProperties("featureProjection", class) + restApiInterpretationArgs := modulesProvider.RestApiAdditionalProperties("interpretation", class) + graphQLArgs := modulesProvider.GraphQLAdditionalFieldNames() + + // then + mod1 := registered[0] + assert.Nil(t, err) + assert.Equal(t, "mod1", mod1.Name()) + assert.NotNil(t, getArgs["nearArgument"]) + assert.NotNil(t, exploreArgs["nearArgument"]) + assert.NotNil(t, extractedArgs["nearArgument"]) + assert.NotNil(t, restApiFPArgs["featureProjection"]) + assert.NotNil(t, restApiInterpretationArgs["interpretation"]) + assert.Contains(t, graphQLArgs, "featureProjection") + assert.Contains(t, graphQLArgs, "interpretation") + }) + + t.Run("should not register additional property modules providing the same params", func(t *testing.T) { + // given + logger, _ := test.NewNullLogger() + modulesProvider := NewProvider(logger, config.Config{}) + schemaGetter := getFakeSchemaGetter() + modulesProvider.SetSchemaGetter(schemaGetter) + + // when + modulesProvider.Register(newGraphQLAdditionalModule("mod1"). + withArg("nearArgument"). + withGraphQLArg("featureProjection", []string{"featureProjection"}). + withRestApiArg("featureProjection", []string{"featureProjection", "fp", "f-p"}), + ) + modulesProvider.Register(newGraphQLAdditionalModule("mod2"). + withArg("nearArgument"). + withGraphQLArg("featureProjection", []string{"featureProjection"}). + withRestApiArg("featureProjection", []string{"featureProjection", "fp", "f-p"}), + ) + err := modulesProvider.Init(context.Background(), nil, logger) + + // then + assert.Nil(t, err) + }) + + t.Run("should not register additional property modules providing internal search param", func(t *testing.T) { + // given + logger, _ := test.NewNullLogger() + modulesProvider := NewProvider(logger, config.Config{}) + schemaGetter := getFakeSchemaGetter() + modulesProvider.SetSchemaGetter(schemaGetter) + + // when + modulesProvider.Register(newGraphQLAdditionalModule("mod1").withArg("nearArgument")) + modulesProvider.Register(newGraphQLAdditionalModule("mod3"). + withExtractFn("limit"). + withExtractFn("where"). + withExtractFn("nearVector"). + withExtractFn("nearObject"). + withExtractFn("group"). + withExtractFn("groupBy"). + withExtractFn("hybrid"). + withExtractFn("bm25"). + withExtractFn("offset"). + withExtractFn("after"). + withGraphQLArg("group", []string{"group"}). + withGraphQLArg("classification", []string{"classification"}). + withRestApiArg("classification", []string{"classification"}). + withGraphQLArg("certainty", []string{"certainty"}). + withRestApiArg("certainty", []string{"certainty"}). + withGraphQLArg("distance", []string{"distance"}). + withRestApiArg("distance", []string{"distance"}). + withGraphQLArg("id", []string{"id"}). + withRestApiArg("id", []string{"id"}), + ) + err := modulesProvider.Init(context.Background(), nil, logger) + + // then + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "searcher: nearObject conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "searcher: nearVector conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "searcher: where conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "searcher: group conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "searcher: groupBy conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "searcher: hybrid conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "searcher: bm25 conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "searcher: limit conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "searcher: offset conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "searcher: after conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "rest api additional property: classification conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "rest api additional property: certainty conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "rest api additional property: distance conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "rest api additional property: id conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "graphql additional property: classification conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "graphql additional property: certainty conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "graphql additional property: distance conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "graphql additional property: id conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "graphql additional property: group conflicts with weaviate's internal searcher in modules: [mod3]") + }) + + t.Run("should not register additional property modules providing faulty params", func(t *testing.T) { + // given + logger, _ := test.NewNullLogger() + modulesProvider := NewProvider(logger, config.Config{}) + schemaGetter := getFakeSchemaGetter() + modulesProvider.SetSchemaGetter(schemaGetter) + + // when + modulesProvider.Register(newGraphQLAdditionalModule("mod1"). + withArg("nearArgument"). + withGraphQLArg("semanticPath", []string{"semanticPath"}). + withRestApiArg("featureProjection", []string{"featureProjection", "fp", "f-p"}), + ) + modulesProvider.Register(newGraphQLAdditionalModule("mod2"). + withArg("nearArgument"). + withGraphQLArg("semanticPath", []string{"semanticPath"}). + withRestApiArg("featureProjection", []string{"featureProjection", "fp", "f-p"}), + ) + modulesProvider.Register(newGraphQLModule("mod3"). + withExtractFn("limit"). + withExtractFn("where"). + withExtractFn("nearVector"). + withExtractFn("nearObject"). + withExtractFn("group"), + ) + modulesProvider.Register(newGraphQLAdditionalModule("mod4"). + withGraphQLArg("classification", []string{"classification"}). + withRestApiArg("classification", []string{"classification"}). + withGraphQLArg("certainty", []string{"certainty"}). + withRestApiArg("certainty", []string{"certainty"}). + withGraphQLArg("id", []string{"id"}). + withRestApiArg("id", []string{"id"}), + ) + err := modulesProvider.Init(context.Background(), nil, logger) + + // then + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "searcher: nearObject conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "searcher: nearVector conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "searcher: where conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "searcher: group conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "searcher: limit conflicts with weaviate's internal searcher in modules: [mod3]") + assert.Contains(t, err.Error(), "rest api additional property: classification conflicts with weaviate's internal searcher in modules: [mod4]") + assert.Contains(t, err.Error(), "rest api additional property: certainty conflicts with weaviate's internal searcher in modules: [mod4]") + assert.Contains(t, err.Error(), "rest api additional property: id conflicts with weaviate's internal searcher in modules: [mod4]") + assert.Contains(t, err.Error(), "graphql additional property: classification conflicts with weaviate's internal searcher in modules: [mod4]") + assert.Contains(t, err.Error(), "graphql additional property: certainty conflicts with weaviate's internal searcher in modules: [mod4]") + assert.Contains(t, err.Error(), "graphql additional property: id conflicts with weaviate's internal searcher in modules: [mod4]") + }) + + t.Run("should register module with alt names", func(t *testing.T) { + logger, _ := test.NewNullLogger() + module := &dummyBackupModuleWithAltNames{} + modulesProvider := NewProvider(logger, config.Config{}) + modulesProvider.Register(module) + + modByName := modulesProvider.GetByName("SomeBackend") + modByAltName1 := modulesProvider.GetByName("AltBackendName") + modByAltName2 := modulesProvider.GetByName("YetAnotherBackendName") + modMissing := modulesProvider.GetByName("DoesNotExist") + + assert.NotNil(t, modByName) + assert.NotNil(t, modByAltName1) + assert.NotNil(t, modByAltName2) + assert.Nil(t, modMissing) + }) + + t.Run("should provide backup backend", func(t *testing.T) { + logger, _ := test.NewNullLogger() + module := &dummyBackupModuleWithAltNames{} + modulesProvider := NewProvider(logger, config.Config{}) + modulesProvider.Register(module) + + provider, ok := interface{}(modulesProvider).(ubackup.BackupBackendProvider) + assert.True(t, ok) + + fmt.Printf("provider: %v\n", provider) + + backendByName, err1 := provider.BackupBackend("SomeBackend") + backendByAltName, err2 := provider.BackupBackend("YetAnotherBackendName") + + assert.NotNil(t, backendByName) + assert.Nil(t, err1) + assert.NotNil(t, backendByAltName) + assert.Nil(t, err2) + }) +} + +func fakeExtractFn(param map[string]interface{}) (interface{}, *dto.TargetCombination, error) { + extracted := map[string]interface{}{} + extracted["nearArgumentParam"] = []string{"fake"} + return extracted, nil, nil +} + +func fakeValidateFn(param interface{}) error { + return nil +} + +func newGraphQLModule(name string) *dummyGraphQLModule { + return &dummyGraphQLModule{ + dummyText2VecModuleNoCapabilities: newDummyText2VecModule(name, nil), + arguments: map[string]modulecapabilities.GraphQLArgument{}, + } +} + +type dummyGraphQLModule struct { + dummyText2VecModuleNoCapabilities + arguments map[string]modulecapabilities.GraphQLArgument +} + +func (m *dummyGraphQLModule) withArg(argName string) *dummyGraphQLModule { + arg := modulecapabilities.GraphQLArgument{ + GetArgumentsFunction: func(classname string) *graphql.ArgumentConfig { return &graphql.ArgumentConfig{} }, + ExploreArgumentsFunction: func() *graphql.ArgumentConfig { return &graphql.ArgumentConfig{} }, + ExtractFunction: fakeExtractFn, + ValidateFunction: fakeValidateFn, + } + m.arguments[argName] = arg + return m +} + +func (m *dummyGraphQLModule) withExtractFn(argName string) *dummyGraphQLModule { + arg := m.arguments[argName] + arg.ExtractFunction = fakeExtractFn + m.arguments[argName] = arg + return m +} + +func (m *dummyGraphQLModule) Arguments() map[string]modulecapabilities.GraphQLArgument { + return m.arguments +} + +func newGraphQLAdditionalModule(name string) *dummyAdditionalModule { + return &dummyAdditionalModule{ + dummyGraphQLModule: *newGraphQLModule(name), + additionalProperties: map[string]modulecapabilities.AdditionalProperty{}, + } +} + +type dummyAdditionalModule struct { + dummyGraphQLModule + additionalProperties map[string]modulecapabilities.AdditionalProperty +} + +func (m *dummyAdditionalModule) withArg(argName string) *dummyAdditionalModule { + m.dummyGraphQLModule.withArg(argName) + return m +} + +func (m *dummyAdditionalModule) withExtractFn(argName string) *dummyAdditionalModule { + arg := m.dummyGraphQLModule.arguments[argName] + arg.ExtractFunction = fakeExtractFn + m.dummyGraphQLModule.arguments[argName] = arg + return m +} + +func (m *dummyAdditionalModule) withGraphQLArg(argName string, values []string) *dummyAdditionalModule { + prop := m.additionalProperties[argName] + if prop.GraphQLNames == nil { + prop.GraphQLNames = []string{} + } + prop.GraphQLNames = append(prop.GraphQLNames, values...) + + m.additionalProperties[argName] = prop + return m +} + +func (m *dummyAdditionalModule) withRestApiArg(argName string, values []string) *dummyAdditionalModule { + prop := m.additionalProperties[argName] + if prop.RestNames == nil { + prop.RestNames = []string{} + } + prop.RestNames = append(prop.RestNames, values...) + prop.DefaultValue = 100 + + m.additionalProperties[argName] = prop + return m +} + +func (m *dummyAdditionalModule) AdditionalProperties() map[string]modulecapabilities.AdditionalProperty { + return m.additionalProperties +} + +func getFakeSchemaGetter() schemaGetter { + sch := enitiesSchema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "ClassOne", + Vectorizer: "mod1", + ModuleConfig: map[string]interface{}{ + "mod": map[string]interface{}{ + "some-config": "some-config-value", + }, + }, + }, + { + Class: "ClassTwo", + Vectorizer: "mod2", + ModuleConfig: map[string]interface{}{ + "mod": map[string]interface{}{ + "some-config": "some-config-value", + }, + }, + }, + { + Class: "ClassThree", + Vectorizer: "mod3", + ModuleConfig: map[string]interface{}{ + "mod": map[string]interface{}{ + "some-config": "some-config-value", + }, + }, + }, + }, + }, + } + return &fakeSchemaGetter{schema: sch} +} + +type dummyBackupModuleWithAltNames struct{} + +func (m *dummyBackupModuleWithAltNames) Name() string { + return "SomeBackend" +} + +func (m *dummyBackupModuleWithAltNames) AltNames() []string { + return []string{"AltBackendName", "YetAnotherBackendName"} +} + +func (m *dummyBackupModuleWithAltNames) Init(ctx context.Context, params moduletools.ModuleInitParams) error { + return nil +} + +func (m *dummyBackupModuleWithAltNames) Type() modulecapabilities.ModuleType { + return modulecapabilities.Backup +} + +func (m *dummyBackupModuleWithAltNames) HomeDir(backupID, overrideBucket, overridePath string) string { + return "" +} + +func (m *dummyBackupModuleWithAltNames) AllBackups(context.Context) ([]*backup.DistributedBackupDescriptor, error) { + return nil, nil +} + +func (m *dummyBackupModuleWithAltNames) GetObject(ctx context.Context, backupID, key, overrideBucket, overridePath string) ([]byte, error) { + return nil, nil +} + +func (m *dummyBackupModuleWithAltNames) WriteToFile(ctx context.Context, backupID, key, destPath, overrideBucket, overridePath string) error { + return nil +} + +func (m *dummyBackupModuleWithAltNames) Write(ctx context.Context, backupID, key, overrideBucket, overridePath string, r io.ReadCloser) (int64, error) { + return 0, nil +} + +func (m *dummyBackupModuleWithAltNames) Read(ctx context.Context, backupID, key, overrideBucket, overridePath string, w io.WriteCloser) (int64, error) { + return 0, nil +} + +func (m *dummyBackupModuleWithAltNames) SourceDataPath() string { + return "" +} + +func (*dummyBackupModuleWithAltNames) IsExternal() bool { + return true +} + +func (m *dummyBackupModuleWithAltNames) PutObject(ctx context.Context, backupID, key, overrideBucket, overridePath string, byes []byte) error { + return nil +} + +func (m *dummyBackupModuleWithAltNames) PutFile(ctx context.Context, backupID, key, overrideBucket, overridePath, filePath string) error { + return nil +} + +func (m *dummyBackupModuleWithAltNames) Initialize(ctx context.Context, backupID, overrideBucket, overridePath string) error { + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/modules/searchers_test.go b/platform/dbops/binaries/weaviate-src/usecases/modules/searchers_test.go new file mode 100644 index 0000000000000000000000000000000000000000..edb20021f303e6f0f2068f7b8778c854c8476310 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/modules/searchers_test.go @@ -0,0 +1,294 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modules + +import ( + "context" + "testing" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/moduletools" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/modulecomponents/generictypes" +) + +func TestModulesWithSearchers(t *testing.T) { + sch := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "MyClass", + Vectorizer: "mod", + ModuleConfig: map[string]interface{}{ + "mod": map[string]interface{}{ + "some-config": "some-config-value", + }, + }, + }, + }, + }, + } + logger, _ := test.NewNullLogger() + + t.Run("get a vector for a class", func(t *testing.T) { + p := NewProvider(logger, config.Config{}) + p.SetSchemaGetter(&fakeSchemaGetter{ + schema: sch, + }) + p.Register(newSearcherModule[[]float32]("mod"). + withArg("nearGrape"). + withSearcher("nearGrape", generictypes.VectorForParams(func(ctx context.Context, params interface{}, + className string, + findVectorFn modulecapabilities.FindVectorFn[[]float32], + cfg moduletools.ClassConfig, + ) ([]float32, error) { + // verify that the config tool is set, as this is a per-class search, + // so it must be set + assert.NotNil(t, cfg) + + // take the findVectorFn and append one dimension. This doesn't make too + // much sense, but helps verify that the modules method was used in the + // decisions + initial, _, _ := findVectorFn.FindVector(ctx, "class", "123", "", "") + return append(initial, 4), nil + })), + ) + p.Init(context.Background(), nil, logger) + + res, err := p.VectorFromSearchParam(context.Background(), "MyClass", "", "", + "nearGrape", nil, generictypes.FindVectorFn(fakeFindVector)) + + require.Nil(t, err) + assert.Equal(t, []float32{1, 2, 3, 4}, res) + }) + + t.Run("no module configured for a class", func(t *testing.T) { + p := NewProvider(logger, config.Config{}) + p.SetSchemaGetter(&fakeSchemaGetter{ + schema: sch, + }) + p.Register(newSearcherModule[[]float32]("mod"). + withArg("nearGrape"). + withSearcher("nearGrape", generictypes.VectorForParams(func(ctx context.Context, params interface{}, + className string, + findVectorFn modulecapabilities.FindVectorFn[[]float32], + cfg moduletools.ClassConfig, + ) ([]float32, error) { + // verify that the config tool is set, as this is a per-class search, + // so it must be set + assert.NotNil(t, cfg) + + // take the findVectorFn and append one dimension. This doesn't make too + // much sense, but helps verify that the modules method was used in the + // decisions + initial, _, _ := findVectorFn.FindVector(ctx, "class", "123", "", "") + return append(initial, 4), nil + })), + ) + p.Init(context.Background(), nil, logger) + + _, err := p.VectorFromSearchParam(context.Background(), "MyClass", "", "", + "nearDoesNotExist", nil, generictypes.FindVectorFn(fakeFindVector)) + + require.NotNil(t, err) + assert.Contains(t, err.Error(), "could not vectorize input for collection") + }) + + t.Run("get a vector across classes", func(t *testing.T) { + p := NewProvider(logger, config.Config{}) + p.SetSchemaGetter(&fakeSchemaGetter{ + schema: sch, + }) + p.Register(newSearcherModule[[]float32]("mod"). + withArg("nearGrape"). + withSearcher("nearGrape", generictypes.VectorForParams(func(ctx context.Context, params interface{}, + className string, + findVectorFn modulecapabilities.FindVectorFn[[]float32], + cfg moduletools.ClassConfig, + ) ([]float32, error) { + // this is a cross-class search, such as is used for Explore{}, in this + // case we do not have class-based config, but we need at least pass + // a tenant information, that's why we pass an empty config with empty tenant + // so that it would be possible to perform cross class searches, without + // tenant context. Modules must be able to deal with this situation! + assert.NotNil(t, cfg) + assert.Equal(t, "", cfg.Tenant()) + + // take the findVectorFn and append one dimension. This doesn't make too + // much sense, but helps verify that the modules method was used in the + // decisions + initial, _, _ := findVectorFn.FindVector(ctx, "class", "123", "", "") + return append(initial, 4), nil + })), + ) + p.Init(context.Background(), nil, logger) + + res, targetVector, err := p.CrossClassVectorFromSearchParam(context.Background(), + "nearGrape", nil, generictypes.FindVectorFn(fakeFindVector)) + + require.Nil(t, err) + assert.Equal(t, []float32{1, 2, 3, 4}, res) + assert.Equal(t, "", targetVector) + }) + + t.Run("explore no vectorizer", func(t *testing.T) { + p := NewProvider(logger, config.Config{}) + p.SetSchemaGetter(&fakeSchemaGetter{ + schema: sch, + }) + p.Register(newSearcherModule[[]float32]("mod"). + withArg("nearGrape"). + withSearcher("nearGrape", generictypes.VectorForParams(func(ctx context.Context, params interface{}, + className string, + findVectorFn modulecapabilities.FindVectorFn[[]float32], + cfg moduletools.ClassConfig, + ) ([]float32, error) { + // this is a cross-class search, such as is used for Explore{}, in this + // case we do not have class-based config, but we need at least pass + // a tenant information, that's why we pass an empty config with empty tenant + // so that it would be possible to perform cross class searches, without + // tenant context. Modules must be able to deal with this situation! + assert.NotNil(t, cfg) + assert.Equal(t, "", cfg.Tenant()) + + // take the findVectorFn and append one dimension. This doesn't make too + // much sense, but helps verify that the modules method was used in the + // decisions + initial, _, _ := findVectorFn.FindVector(ctx, "class", "123", "", "") + return append(initial, 4), nil + })), + ) + p.Init(context.Background(), nil, logger) + + _, _, err := p.CrossClassVectorFromSearchParam(context.Background(), + "nearDoesNotExist", nil, generictypes.FindVectorFn(fakeFindVector)) + + require.NotNil(t, err) + }) + + t.Run("get a multi vector for a class", func(t *testing.T) { + p := NewProvider(logger, config.Config{}) + p.SetSchemaGetter(&fakeSchemaGetter{ + schema: sch, + }) + p.Register(newSearcherModule[[][]float32]("mod"). + withArg("nearGrape"). + withSearcher("nearGrape", generictypes.MultiVectorForParams(func(ctx context.Context, params interface{}, + className string, + findVectorFn modulecapabilities.FindVectorFn[[][]float32], + cfg moduletools.ClassConfig, + ) ([][]float32, error) { + // verify that the config tool is set, as this is a per-class search, + // so it must be set + assert.NotNil(t, cfg) + + // take the findVectorFn and append one dimension. This doesn't make too + // much sense, but helps verify that the modules method was used in the + // decisions + initial, _, _ := findVectorFn.FindVector(ctx, "class", "123", "", "") + return initial, nil + })), + ) + p.Init(context.Background(), nil, logger) + + res, err := p.MultiVectorFromSearchParam(context.Background(), "MyClass", "", "", + "nearGrape", nil, generictypes.MultiFindVectorFn(multiFakeFindVector)) + + require.Nil(t, err) + assert.Equal(t, [][]float32{{0.1, 0.2, 0.3}, {0.11, 0.22, 0.33}}, res) + }) + + t.Run("get a multi vector across classes", func(t *testing.T) { + p := NewProvider(logger, config.Config{}) + p.SetSchemaGetter(&fakeSchemaGetter{ + schema: sch, + }) + p.Register(newSearcherModule[[][]float32]("mod"). + withArg("nearGrape"). + withSearcher("nearGrape", generictypes.MultiVectorForParams(func(ctx context.Context, params interface{}, + className string, + findVectorFn modulecapabilities.FindVectorFn[[][]float32], + cfg moduletools.ClassConfig, + ) ([][]float32, error) { + // this is a cross-class search, such as is used for Explore{}, in this + // case we do not have class-based config, but we need at least pass + // a tenant information, that's why we pass an empty config with empty tenant + // so that it would be possible to perform cross class searches, without + // tenant context. Modules must be able to deal with this situation! + assert.NotNil(t, cfg) + assert.Equal(t, "", cfg.Tenant()) + + // take the findVectorFn and append one dimension. This doesn't make too + // much sense, but helps verify that the modules method was used in the + // decisions + initial, _, _ := findVectorFn.FindVector(ctx, "class", "123", "", "") + return initial, nil + })), + ) + p.Init(context.Background(), nil, logger) + + res, targetVector, err := p.MultiCrossClassVectorFromSearchParam(context.Background(), + "nearGrape", nil, generictypes.MultiFindVectorFn(multiFakeFindVector)) + + require.Nil(t, err) + assert.Equal(t, [][]float32{{0.1, 0.2, 0.3}, {0.11, 0.22, 0.33}}, res) + assert.Equal(t, "", targetVector) + }) +} + +func fakeFindVector(ctx context.Context, className string, id strfmt.UUID, tenant, targetVector string) ([]float32, string, error) { + return []float32{1, 2, 3}, targetVector, nil +} + +func multiFakeFindVector(ctx context.Context, className string, id strfmt.UUID, tenant, targetVector string) ([][]float32, string, error) { + return [][]float32{{0.1, 0.2, 0.3}, {0.11, 0.22, 0.33}}, targetVector, nil +} + +func newSearcherModule[T dto.Embedding](name string) *dummySearcherModule[T] { + return &dummySearcherModule[T]{ + dummyGraphQLModule: newGraphQLModule(name), + searchers: map[string]modulecapabilities.VectorForParams[T]{}, + } +} + +type dummySearcherModule[T dto.Embedding] struct { + *dummyGraphQLModule + searchers map[string]modulecapabilities.VectorForParams[T] +} + +func (m *dummySearcherModule[T]) withArg(arg string) *dummySearcherModule[T] { + // call the super's withArg + m.dummyGraphQLModule.withArg(arg) + + // but don't return their return type but ours :) + return m +} + +// a helper for our test +func (m *dummySearcherModule[T]) withSearcher(arg string, + impl modulecapabilities.VectorForParams[T], +) *dummySearcherModule[T] { + m.searchers[arg] = impl + return m +} + +// public method to implement the modulecapabilities.Searcher interface +func (m *dummySearcherModule[T]) VectorSearches() map[string]modulecapabilities.VectorForParams[T] { + return m.searchers +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/modules/vectorizer.go b/platform/dbops/binaries/weaviate-src/usecases/modules/vectorizer.go new file mode 100644 index 0000000000000000000000000000000000000000..fd696d25dc49ba456c88d8af5c6ebd7b61ed7fbb --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/modules/vectorizer.go @@ -0,0 +1,592 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modules + +import ( + "context" + "errors" + "fmt" + "runtime" + + "github.com/weaviate/weaviate/entities/dto" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/modelsext" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/moduletools" + "github.com/weaviate/weaviate/entities/vectorindex/dynamic" + "github.com/weaviate/weaviate/entities/vectorindex/flat" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/config" +) + +var _NUMCPU = runtime.NumCPU() + +const ( + errorVectorizerCapability = "module %q exists, but does not provide the " + + "Vectorizer or ReferenceVectorizer capability" + + errorVectorIndexType = "vector index config (%T) is not of type HNSW, " + + "but objects manager is restricted to HNSW" + + warningVectorIgnored = "This vector will be ignored. If you meant to index " + + "the vector, make sure to set vectorIndexConfig.skip to 'false'. If the previous " + + "setting is correct, make sure you set vectorizer to 'none' in the schema and " + + "provide a null-vector (i.e. no vector) at import time." + + warningSkipVectorGenerated = "this class is configured to skip vector indexing, " + + "but a vector was generated by the %q vectorizer. " + warningVectorIgnored + + warningSkipVectorProvided = "this class is configured to skip vector indexing, " + + "but a vector was explicitly provided. " + warningVectorIgnored +) + +func (p *Provider) ValidateVectorizer(moduleName string) error { + mod := p.GetByName(moduleName) + if mod == nil { + return fmt.Errorf("no module with name %q present", moduleName) + } + + okVec := p.implementsVectorizer(mod) + okRefVec := p.implementsReferenceVectorizer(mod) + if !okVec && !okRefVec { + return fmt.Errorf(errorVectorizerCapability, moduleName) + } + + return nil +} + +func (p *Provider) UsingRef2Vec(className string) bool { + class, err := p.getClass(className) + if err != nil { + return false + } + + cfg := class.ModuleConfig + if cfg == nil { + return false + } + + for modName := range cfg.(map[string]interface{}) { + if p.implementsReferenceVectorizer(p.GetByName(modName)) { + return true + } + } + + return false +} + +func (p *Provider) BatchUpdateVector(ctx context.Context, class *models.Class, objects []*models.Object, + findObjectFn modulecapabilities.FindObjectFn, + logger logrus.FieldLogger, +) (map[int]error, error) { + modConfigs, err := p.getModuleConfigs(class) + if err != nil { + return nil, err + } + + if len(modConfigs) == 0 { + // short-circuit collections without vector index + if class.Vectorizer == config.VectorizerModuleNone { + return nil, nil + } + + return nil, fmt.Errorf("no vectorizer configs for class %s", class.Class) + } + + var ( + vecErrorsList = make([]map[int]error, len(modConfigs)) + errorList = make([]error, len(modConfigs)) + counter = 0 + ) + + eg := enterrors.NewErrorGroupWrapper(logger) + eg.SetLimit(_NUMCPU) + for targetVector, modConfig := range modConfigs { + shouldVectorizeClass, err := p.shouldVectorizeClass(class, targetVector, logger) + if err != nil { + errorList[counter] = err + continue + } + if shouldVectorizeClass { + targetVector := targetVector + modConfig := modConfig + counter := counter + + fun := func() error { + vecErrors, err := p.batchUpdateVector(ctx, objects, class, findObjectFn, targetVector, modConfig) + errorList[counter] = err + vecErrorsList[counter] = vecErrors + return nil // to use error group + } + eg.Go(fun) + } + + counter += 1 + } + if err := eg.Wait(); err != nil { + return nil, err + } + + // combine errors from different runs + combinedErrors := make(map[int]error, 0) + for _, vecErrors := range vecErrorsList { + for i, vecError := range vecErrors { + if existingErr, ok := combinedErrors[i]; ok { + vecError = errors.Join(existingErr, vecError) + } + combinedErrors[i] = vecError + } + } + + return combinedErrors, errors.Join(errorList...) +} + +func (p *Provider) shouldVectorizeClass(class *models.Class, targetVector string, logger logrus.FieldLogger) (bool, error) { + hnswConfig, err := p.getVectorIndexConfig(class, targetVector) + if err != nil { + return false, err + } + + vectorizer := p.getVectorizer(class, targetVector) + if vectorizer == config.VectorizerModuleNone { + return false, nil + } + + if hnswConfig.Skip { + logger.WithField("className", class.Class). + WithField("vectorizer", vectorizer). + Warningf(warningSkipVectorGenerated, vectorizer) + } + return true, nil +} + +func (p *Provider) batchUpdateVector(ctx context.Context, objects []*models.Object, class *models.Class, + findObjectFn modulecapabilities.FindObjectFn, + targetVector string, modConfig map[string]interface{}, +) (map[int]error, error) { + found := p.getModule(modConfig) + if found == nil { + return nil, fmt.Errorf("no vectorizer found for class %q", class.Class) + } + cfg := NewClassBasedModuleConfig(class, found.Name(), "", targetVector, &p.cfg) + + if vectorizer, ok := found.(modulecapabilities.Vectorizer[[]float32]); ok { + // each target vector can have its own associated properties, and we need to determine for each one if we should + // skip it or not. To simplify things, we create a boolean slice that indicates for each object if the given + // vectorizer needs to act on it or not. This allows us to use the same objects slice for all vectorizers and + // simplifies the mapping of the returned vectors to the objects. + skipRevectorization := make([]bool, len(objects)) + for i, obj := range objects { + if !p.shouldVectorizeObject(obj, cfg) { + skipRevectorization[i] = true + continue + } + reVectorize, addProps, vector, err := reVectorize(ctx, cfg, vectorizer, obj, + class, nil, targetVector, findObjectFn, p.cfg.RevectorizeCheckDisabled.Get()) + if err != nil { + return nil, fmt.Errorf("cannot vectorize class %q: %w", class.Class, err) + } + if !reVectorize { + skipRevectorization[i] = true + p.lockGuard(func() { + p.addVectorToObject(obj, vector, nil, addProps, cfg) + }) + } + } + vectors, addProps, vecErrors := vectorizer.VectorizeBatch(ctx, objects, skipRevectorization, cfg) + for i := range objects { + if _, ok := vecErrors[i]; ok || skipRevectorization[i] { + continue + } + + var addProp models.AdditionalProperties = nil + if addProps != nil { // only present for contextionary and probably nobody is using this + addProp = addProps[i] + } + + p.lockGuard(func() { + p.addVectorToObject(objects[i], vectors[i], nil, addProp, cfg) + }) + } + + return vecErrors, nil + } else if vectorizer, ok := found.(modulecapabilities.Vectorizer[[][]float32]); ok { + // each target vector can have its own associated properties, and we need to determine for each one if we should + // skip it or not. To simplify things, we create a boolean slice that indicates for each object if the given + // vectorizer needs to act on it or not. This allows us to use the same objects slice for all vectorizers and + // simplifies the mapping of the returned vectors to the objects. + skipRevectorization := make([]bool, len(objects)) + for i, obj := range objects { + if !p.shouldVectorizeObject(obj, cfg) { + skipRevectorization[i] = true + continue + } + reVectorize, addProps, multiVector, err := reVectorizeMulti(ctx, cfg, + vectorizer, obj, class, nil, targetVector, findObjectFn, + p.cfg.RevectorizeCheckDisabled.Get()) + if err != nil { + return nil, fmt.Errorf("cannot vectorize class %q: %w", class.Class, err) + } + if !reVectorize { + skipRevectorization[i] = true + p.lockGuard(func() { + p.addVectorToObject(obj, nil, multiVector, addProps, cfg) + }) + } + } + multiVectors, addProps, vecErrors := vectorizer.VectorizeBatch(ctx, objects, skipRevectorization, cfg) + for i := range objects { + if _, ok := vecErrors[i]; ok || skipRevectorization[i] { + continue + } + + var addProp models.AdditionalProperties = nil + if addProps != nil { // only present for contextionary and probably nobody is using this + addProp = addProps[i] + } + + p.lockGuard(func() { + p.addVectorToObject(objects[i], nil, multiVectors[i], addProp, cfg) + }) + } + + return vecErrors, nil + } else { + refVectorizer := found.(modulecapabilities.ReferenceVectorizer[[]float32]) + errs := make(map[int]error, 0) + for i, obj := range objects { + vector, err := refVectorizer.VectorizeObject(ctx, obj, cfg, findObjectFn) + if err != nil { + errs[i] = fmt.Errorf("update reference vector: %w", err) + } + p.lockGuard(func() { + p.addVectorToObject(obj, vector, nil, nil, cfg) + }) + } + return errs, nil + } +} + +func (p *Provider) UpdateVector(ctx context.Context, object *models.Object, class *models.Class, + findObjectFn modulecapabilities.FindObjectFn, + logger logrus.FieldLogger, +) error { + eg := enterrors.NewErrorGroupWrapper(logger) + eg.SetLimit(_NUMCPU) + + modConfigs, err := p.getModuleConfigs(class) + if err != nil { + return err + } + + for targetVector, modConfig := range modConfigs { + targetVector := targetVector // https://golang.org/doc/faq#closures_and_goroutines + modConfig := modConfig // https://golang.org/doc/faq#closures_and_goroutines + eg.Go(func() error { + return p.vectorizeOne(ctx, object, class, findObjectFn, targetVector, modConfig, logger) + }, targetVector) + } + if err = eg.Wait(); err != nil { + return err + } + return nil +} + +func (p *Provider) lockGuard(mutate func()) { + p.vectorsLock.Lock() + defer p.vectorsLock.Unlock() + mutate() +} + +func (p *Provider) addVectorToObject(object *models.Object, + vector []float32, multiVector [][]float32, + additional models.AdditionalProperties, cfg moduletools.ClassConfig, +) { + if len(additional) > 0 { + if object.Additional == nil { + object.Additional = models.AdditionalProperties{} + } + for additionalName, additionalValue := range additional { + object.Additional[additionalName] = additionalValue + } + } + if cfg.TargetVector() == "" { + object.Vector = vector + return + } + if object.Vectors == nil { + object.Vectors = models.Vectors{} + } + if multiVector != nil { + object.Vectors[cfg.TargetVector()] = multiVector + } else { + object.Vectors[cfg.TargetVector()] = vector + } +} + +func (p *Provider) vectorizeOne(ctx context.Context, object *models.Object, class *models.Class, + findObjectFn modulecapabilities.FindObjectFn, + targetVector string, modConfig map[string]interface{}, + logger logrus.FieldLogger, +) error { + vectorize, err := p.shouldVectorize(object, class, targetVector, logger) + if err != nil { + return fmt.Errorf("vectorize check for target vector %s: %w", targetVector, err) + } + if vectorize { + if err := p.vectorize(ctx, object, class, findObjectFn, targetVector, modConfig); err != nil { + return fmt.Errorf("vectorize target vector %s: %w", targetVector, err) + } + } + return nil +} + +func (p *Provider) vectorize(ctx context.Context, object *models.Object, class *models.Class, + findObjectFn modulecapabilities.FindObjectFn, + targetVector string, modConfig map[string]interface{}, +) error { + found := p.getModule(modConfig) + if found == nil { + return fmt.Errorf( + "no vectorizer found for class %q", object.Class) + } + + cfg := NewClassBasedModuleConfig(class, found.Name(), "", targetVector, &p.cfg) + + if vectorizer, ok := found.(modulecapabilities.Vectorizer[[]float32]); ok { + if p.shouldVectorizeObject(object, cfg) { + var targetProperties []string + vecConfig, ok := modConfig[found.Name()] + if ok { + if properties, ok := vecConfig.(map[string]interface{})["properties"]; ok { + if propSlice, ok := properties.([]string); ok { + targetProperties = propSlice + } + } + } + needsRevectorization, additionalProperties, vector, err := reVectorize(ctx, + cfg, vectorizer, object, class, targetProperties, targetVector, findObjectFn, + p.cfg.RevectorizeCheckDisabled.Get()) + if err != nil { + return fmt.Errorf("cannot revectorize class %q: %w", object.Class, err) + } + if needsRevectorization { + var err error + vector, additionalProperties, err = vectorizer.VectorizeObject(ctx, object, cfg) + if err != nil { + return fmt.Errorf("update vector: %w", err) + } + } + + p.lockGuard(func() { + p.addVectorToObject(object, vector, nil, additionalProperties, cfg) + }) + return nil + } + } else if vectorizer, ok := found.(modulecapabilities.Vectorizer[[][]float32]); ok { + if p.shouldVectorizeObject(object, cfg) { + var targetProperties []string + vecConfig, ok := modConfig[found.Name()] + if ok { + if properties, ok := vecConfig.(map[string]interface{})["properties"]; ok { + if propSlice, ok := properties.([]string); ok { + targetProperties = propSlice + } + } + } + needsRevectorization, additionalProperties, multiVector, err := reVectorizeMulti(ctx, + cfg, vectorizer, object, class, targetProperties, targetVector, findObjectFn, + p.cfg.RevectorizeCheckDisabled.Get()) + if err != nil { + return fmt.Errorf("cannot revectorize class %q: %w", object.Class, err) + } + if needsRevectorization { + var err error + multiVector, additionalProperties, err = vectorizer.VectorizeObject(ctx, object, cfg) + if err != nil { + return fmt.Errorf("update vector: %w", err) + } + } + + p.lockGuard(func() { + p.addVectorToObject(object, nil, multiVector, additionalProperties, cfg) + }) + return nil + } + } else { + refVectorizer := found.(modulecapabilities.ReferenceVectorizer[[]float32]) + vector, err := refVectorizer.VectorizeObject(ctx, object, cfg, findObjectFn) + if err != nil { + return fmt.Errorf("update reference vector: %w", err) + } + p.lockGuard(func() { + p.addVectorToObject(object, vector, nil, nil, cfg) + }) + } + return nil +} + +func (p *Provider) shouldVectorizeObject(object *models.Object, cfg moduletools.ClassConfig) bool { + if cfg.TargetVector() == "" { + return object.Vector == nil + } + + targetVectorExists := false + p.lockGuard(func() { + vec, ok := object.Vectors[cfg.TargetVector()] + isVectorEmpty, _ := dto.IsVectorEmpty(vec) + targetVectorExists = ok && !isVectorEmpty + }) + return !targetVectorExists +} + +func (p *Provider) shouldVectorize(object *models.Object, class *models.Class, + targetVector string, logger logrus.FieldLogger, +) (bool, error) { + hnswConfig, err := p.getVectorIndexConfig(class, targetVector) + if err != nil { + return false, err + } + + vectorizer := p.getVectorizer(class, targetVector) + if vectorizer == config.VectorizerModuleNone { + vector := p.getVector(object, targetVector) + isEmpty, err := dto.IsVectorEmpty(vector) + if err != nil { + return false, fmt.Errorf("should vectorize: is vector empty: %w", err) + } + if hnswConfig.Skip && !isEmpty { + logger.WithField("className", class.Class). + Warningf(warningSkipVectorProvided) + } + return false, nil + } + + if hnswConfig.Skip { + logger.WithField("className", class.Class). + WithField("vectorizer", vectorizer). + Warningf(warningSkipVectorGenerated, vectorizer) + } + return true, nil +} + +func (p *Provider) getVectorizer(class *models.Class, targetVector string) string { + if targetVector != "" && len(class.VectorConfig) > 0 { + if vectorConfig, ok := class.VectorConfig[targetVector]; ok { + if vectorizer, ok := vectorConfig.Vectorizer.(map[string]interface{}); ok && len(vectorizer) == 1 { + for vectorizerName := range vectorizer { + return vectorizerName + } + } + } + return "" + } + return class.Vectorizer +} + +func (p *Provider) getVector(object *models.Object, targetVector string) models.Vector { + p.vectorsLock.Lock() + defer p.vectorsLock.Unlock() + if targetVector != "" { + if len(object.Vectors) == 0 { + return nil + } + return object.Vectors[targetVector] + } + return object.Vector +} + +func (p *Provider) getVectorIndexConfig(class *models.Class, targetVector string) (hnsw.UserConfig, error) { + vectorIndexConfig := class.VectorIndexConfig + if targetVector != "" { + vectorIndexConfig = class.VectorConfig[targetVector].VectorIndexConfig + } + hnswConfig, okHnsw := vectorIndexConfig.(hnsw.UserConfig) + _, okFlat := vectorIndexConfig.(flat.UserConfig) + _, okDynamic := vectorIndexConfig.(dynamic.UserConfig) + if !(okHnsw || okFlat || okDynamic) { + return hnsw.UserConfig{}, fmt.Errorf(errorVectorIndexType, vectorIndexConfig) + } + return hnswConfig, nil +} + +func (p *Provider) getModuleConfigs(class *models.Class) (map[string]map[string]interface{}, error) { + modConfigs := map[string]map[string]interface{}{} + // get all named vectorizers for classs + for name, vectorConfig := range class.VectorConfig { + modConfig, ok := vectorConfig.Vectorizer.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("class %v vectorizer %s not present", class.Class, name) + } + modConfigs[name] = modConfig + } + + if modelsext.ClassHasLegacyVectorIndex(class) && class.Vectorizer != config.VectorizerModuleNone { + if modConfig, ok := class.ModuleConfig.(map[string]interface{}); ok { + modConfigs[""] = modConfig + } else { + return nil, fmt.Errorf("no moduleconfig for class %v present", class.Class) + } + } + + return modConfigs, nil +} + +func (p *Provider) getModule(modConfig map[string]interface{}) (found modulecapabilities.Module) { + for modName := range modConfig { + if err := p.ValidateVectorizer(modName); err == nil { + found = p.GetByName(modName) + break + } + } + return +} + +func (p *Provider) VectorizerName(className string) (string, error) { + name, _, err := p.getClassVectorizer(className) + if err != nil { + return "", err + } + return name, nil +} + +func (p *Provider) getClassVectorizer(className string) (string, interface{}, error) { + class := p.schemaGetter.ReadOnlyClass(className) + if class == nil { + // this should be impossible by the time this method gets called, but let's + // be 100% certain + return "", nil, fmt.Errorf("class %s not present", className) + } + + return class.Vectorizer, class.VectorIndexConfig, nil +} + +func (p *Provider) implementsVectorizer(mod modulecapabilities.Module) bool { + switch mod.(type) { + case modulecapabilities.Vectorizer[[]float32], modulecapabilities.Vectorizer[[][]float32]: + return true + default: + return false + } +} + +func (p *Provider) implementsReferenceVectorizer(mod modulecapabilities.Module) bool { + switch mod.(type) { + case modulecapabilities.ReferenceVectorizer[[]float32], modulecapabilities.ReferenceVectorizer[[][]float32]: + return true + default: + return false + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/modules/vectorizer_test.go b/platform/dbops/binaries/weaviate-src/usecases/modules/vectorizer_test.go new file mode 100644 index 0000000000000000000000000000000000000000..77c5d3412e0f24ce8c1f8acb5c49a415d050a5c3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/modules/vectorizer_test.go @@ -0,0 +1,318 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package modules + +import ( + "context" + "fmt" + "testing" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/config" +) + +func TestProvider_ValidateVectorizer(t *testing.T) { + logger, _ := test.NewNullLogger() + t.Run("with vectorizer module", func(t *testing.T) { + p := NewProvider(logger, config.Config{}) + vec := newDummyModule("some-module", modulecapabilities.Text2Vec) + p.Register(vec) + + err := p.ValidateVectorizer(vec.Name()) + assert.Nil(t, err) + }) + + t.Run("with reference vectorizer module", func(t *testing.T) { + p := NewProvider(logger, config.Config{}) + refVec := newDummyModule("some-module", modulecapabilities.Ref2Vec) + p.Register(refVec) + + err := p.ValidateVectorizer(refVec.Name()) + assert.Nil(t, err) + }) + + t.Run("with non-vectorizer module", func(t *testing.T) { + modName := "some-module" + p := NewProvider(logger, config.Config{}) + nonVec := newDummyModule(modName, "") + p.Register(nonVec) + + expectedErr := fmt.Sprintf( + "module %q exists, but does not provide the Vectorizer or ReferenceVectorizer capability", + modName) + err := p.ValidateVectorizer(nonVec.Name()) + assert.EqualError(t, err, expectedErr) + }) + + t.Run("with unregistered module", func(t *testing.T) { + modName := "does-not-exist" + p := NewProvider(logger, config.Config{}) + expectedErr := fmt.Sprintf( + "no module with name %q present", + modName) + err := p.ValidateVectorizer(modName) + assert.EqualError(t, err, expectedErr) + }) +} + +func TestProvider_UsingRef2Vec(t *testing.T) { + logger, _ := test.NewNullLogger() + t.Run("with ReferenceVectorizer", func(t *testing.T) { + modName := "some-module" + className := "SomeClass" + mod := newDummyModule(modName, modulecapabilities.Ref2Vec) + sch := schema.Schema{Objects: &models.Schema{ + Classes: []*models.Class{{ + Class: className, + ModuleConfig: map[string]interface{}{ + modName: struct{}{}, + }, + }}, + }} + p := NewProvider(logger, config.Config{}) + p.SetSchemaGetter(&fakeSchemaGetter{sch}) + p.Register(mod) + assert.True(t, p.UsingRef2Vec(className)) + }) + + t.Run("with Vectorizer", func(t *testing.T) { + modName := "some-module" + className := "SomeClass" + mod := newDummyModule(modName, modulecapabilities.Text2Vec) + sch := schema.Schema{Objects: &models.Schema{ + Classes: []*models.Class{{ + Class: className, + ModuleConfig: map[string]interface{}{ + modName: struct{}{}, + }, + }}, + }} + p := NewProvider(logger, config.Config{}) + p.SetSchemaGetter(&fakeSchemaGetter{sch}) + p.Register(mod) + assert.False(t, p.UsingRef2Vec(className)) + }) + + t.Run("with nonexistent class", func(t *testing.T) { + className := "SomeClass" + mod := newDummyModule("", "") + + p := NewProvider(logger, config.Config{}) + p.SetSchemaGetter(&fakeSchemaGetter{schema.Schema{}}) + p.Register(mod) + assert.False(t, p.UsingRef2Vec(className)) + }) + + t.Run("with empty class module config", func(t *testing.T) { + modName := "some-module" + className := "SomeClass" + mod := newDummyModule(modName, modulecapabilities.Text2Vec) + sch := schema.Schema{Objects: &models.Schema{ + Classes: []*models.Class{{ + Class: className, + }}, + }} + p := NewProvider(logger, config.Config{}) + p.SetSchemaGetter(&fakeSchemaGetter{sch}) + p.Register(mod) + assert.False(t, p.UsingRef2Vec(className)) + }) + + t.Run("with unregistered module", func(t *testing.T) { + modName := "some-module" + className := "SomeClass" + sch := schema.Schema{Objects: &models.Schema{ + Classes: []*models.Class{{ + Class: className, + ModuleConfig: map[string]interface{}{ + modName: struct{}{}, + }, + }}, + }} + p := NewProvider(logger, config.Config{}) + p.SetSchemaGetter(&fakeSchemaGetter{sch}) + assert.False(t, p.UsingRef2Vec(className)) + }) +} + +func TestProvider_UpdateVector(t *testing.T) { + t.Run("with Vectorizer", func(t *testing.T) { + ctx := context.Background() + modName := "some-vzr" + className := "SomeClass" + mod := newDummyModule(modName, modulecapabilities.Text2Vec) + class := models.Class{ + Class: className, + ModuleConfig: map[string]interface{}{ + modName: map[string]interface{}{}, + }, + Vectorizer: "text2vec-contextionary", + VectorIndexConfig: hnsw.UserConfig{}, + } + sch := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{&class}, + }, + } + repo := &fakeObjectsRepo{} + logger, _ := test.NewNullLogger() + + p := NewProvider(logger, config.Config{}) + p.Register(mod) + p.SetSchemaGetter(&fakeSchemaGetter{sch}) + + obj := &models.Object{Class: className, ID: newUUID()} + err := p.UpdateVector(ctx, obj, &class, repo.Object, logger) + assert.Nil(t, err) + }) + + t.Run("with missing vectorizer modconfig", func(t *testing.T) { + ctx := context.Background() + class := &models.Class{ + Class: "SomeClass", + VectorIndexConfig: hnsw.UserConfig{}, + Vectorizer: "text2vec-contextionary", + } + mod := newDummyModule("", "") + logger, _ := test.NewNullLogger() + + p := NewProvider(logger, config.Config{}) + p.Register(mod) + p.SetSchemaGetter(&fakeSchemaGetter{schema.Schema{}}) + + obj := &models.Object{Class: class.Class, ID: newUUID()} + err := p.UpdateVector(ctx, obj, class, (&fakeObjectsRepo{}).Object, logger) + expectedErr := fmt.Sprintf("no moduleconfig for class %v present", class.Class) + assert.EqualError(t, err, expectedErr) + }) + + t.Run("with no vectors configuration", func(t *testing.T) { + ctx := context.Background() + class := &models.Class{ + Class: "SomeClass", + Vectorizer: "none", + } + + logger, _ := test.NewNullLogger() + p := NewProvider(logger, config.Config{}) + + obj := &models.Object{Class: class.Class, ID: newUUID()} + err := p.UpdateVector(ctx, obj, class, (&fakeObjectsRepo{}).Object, logger) + require.NoError(t, err) + }) + + t.Run("with ReferenceVectorizer", func(t *testing.T) { + ctx := context.Background() + modName := "some-vzr" + className := "SomeClass" + mod := newDummyModule(modName, modulecapabilities.Ref2Vec) + class := &models.Class{ + Class: className, + ModuleConfig: map[string]interface{}{ + modName: struct{}{}, + }, + Vectorizer: "text2vec-contextionary", + VectorIndexConfig: hnsw.UserConfig{}, + } + + sch := schema.Schema{Objects: &models.Schema{ + Classes: []*models.Class{class}, + }} + repo := &fakeObjectsRepo{} + logger, _ := test.NewNullLogger() + + p := NewProvider(logger, config.Config{}) + p.Register(mod) + p.SetSchemaGetter(&fakeSchemaGetter{sch}) + + obj := &models.Object{Class: className, ID: newUUID()} + err := p.UpdateVector(ctx, obj, class, repo.Object, logger) + assert.Nil(t, err) + }) + + t.Run("with nonexistent vector index config type", func(t *testing.T) { + ctx := context.Background() + modName := "some-vzr" + className := "SomeClass" + mod := newDummyModule(modName, modulecapabilities.Ref2Vec) + class := &models.Class{ + Class: className, + ModuleConfig: map[string]interface{}{ + modName: struct{}{}, + }, + Vectorizer: "text2vec-contextionary", + VectorIndexConfig: struct{}{}, + } + sch := schema.Schema{Objects: &models.Schema{ + Classes: []*models.Class{class}, + }} + repo := &fakeObjectsRepo{} + logger, _ := test.NewNullLogger() + + p := NewProvider(logger, config.Config{}) + p.Register(mod) + p.SetSchemaGetter(&fakeSchemaGetter{sch}) + + obj := &models.Object{Class: className, ID: newUUID()} + + err := p.UpdateVector(ctx, obj, class, repo.Object, logger) + expectedErr := "vector index config (struct {}) is not of type HNSW, " + + "but objects manager is restricted to HNSW" + require.ErrorContains(t, err, expectedErr) + }) + + t.Run("with ColBERT Vectorizer", func(t *testing.T) { + ctx := context.Background() + modName := "colbert" + className := "SomeClass" + mod := newDummyModule(modName, modulecapabilities.Text2Multivec) + class := models.Class{ + Class: className, + VectorConfig: map[string]models.VectorConfig{ + "colbert": { + Vectorizer: map[string]interface{}{modName: map[string]interface{}{}}, + VectorIndexConfig: hnsw.UserConfig{Multivector: hnsw.MultivectorConfig{Enabled: true}}, + VectorIndexType: "hnsw", + }, + }, + } + sch := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{&class}, + }, + } + repo := &fakeObjectsRepo{} + logger, _ := test.NewNullLogger() + + p := NewProvider(logger, config.Config{}) + p.Register(mod) + p.SetSchemaGetter(&fakeSchemaGetter{sch}) + + obj := &models.Object{Class: className, ID: newUUID()} + err := p.UpdateVector(ctx, obj, &class, repo.Object, logger) + assert.NoError(t, err) + assert.NotEmpty(t, obj.Vectors) + assert.Equal(t, [][]float32{{0.11, 0.22, 0.33}, {0.11, 0.22, 0.33}}, obj.Vectors["colbert"]) + }) +} + +func newUUID() strfmt.UUID { + return strfmt.UUID(uuid.NewString()) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/monitoring/grpc.go b/platform/dbops/binaries/weaviate-src/usecases/monitoring/grpc.go new file mode 100644 index 0000000000000000000000000000000000000000..65450300e9d8bbb496154bc46e2928293333e05f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/monitoring/grpc.go @@ -0,0 +1,196 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package monitoring + +import ( + "context" + "errors" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// Make sure `GrpcStatsHandler always implements stats.Handler +var _ stats.Handler = &GrpcStatsHandler{} + +type key int + +const ( + keyMethodName key = 1 + keyRouteName key = 2 +) + +// InstrumentGrpc accepts server metrics and returns the few `[]grpc.ServerOption` which you can +// then wrap it with any `grpc.Server` to get these metrics instrumented automatically. +// +// ``` +// +// svrMetrics := monitoring.NewGRPCServerMetrics(metrics, prometheus.DefaultRegisterer) +// grpcServer := grpc.NewServer(monitoring.InstrumentGrpc(*svrMetrics)...) +// +// grpcServer.Serve(listener) +// +// ``` +func InstrumentGrpc(svrMetrics *GRPCServerMetrics) []grpc.ServerOption { + grpcOptions := []grpc.ServerOption{ + grpc.StatsHandler(NewGrpcStatsHandler( + svrMetrics.InflightRequests, + svrMetrics.RequestBodySize, + svrMetrics.ResponseBodySize, + )), + } + + grpcInterceptUnary := grpc.ChainUnaryInterceptor( + UnaryServerInstrument(svrMetrics.RequestDuration), + ) + grpcOptions = append(grpcOptions, grpcInterceptUnary) + + grpcInterceptStream := grpc.ChainStreamInterceptor( + StreamServerInstrument(svrMetrics.RequestDuration), + ) + grpcOptions = append(grpcOptions, grpcInterceptStream) + + return grpcOptions +} + +func NewGrpcStatsHandler(inflight *prometheus.GaugeVec, requestSize *prometheus.HistogramVec, responseSize *prometheus.HistogramVec) *GrpcStatsHandler { + return &GrpcStatsHandler{ + inflightRequests: inflight, + requestSize: requestSize, + responseSize: responseSize, + } +} + +type GrpcStatsHandler struct { + inflightRequests *prometheus.GaugeVec + + // in bytes + requestSize *prometheus.HistogramVec + responseSize *prometheus.HistogramVec +} + +func (g *GrpcStatsHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + return context.WithValue(ctx, keyMethodName, info.FullMethodName) +} + +func (g *GrpcStatsHandler) HandleRPC(ctx context.Context, rpcStats stats.RPCStats) { + fullMethodName, ok := ctx.Value(keyMethodName).(string) + if !ok { + return + } + + service, method := splitFullMethodName(fullMethodName) + + switch s := rpcStats.(type) { + case *stats.Begin: + g.inflightRequests.WithLabelValues(service, method).Inc() + case *stats.End: + g.inflightRequests.WithLabelValues(service, method).Dec() + case *stats.InHeader: + // Ignore incoming headers. + case *stats.InPayload: + g.requestSize.WithLabelValues(service, method).Observe(float64(s.WireLength)) + case *stats.InTrailer: + // Ignore incoming trailers. + case *stats.OutHeader: + // Ignore outgoing headers. + case *stats.OutPayload: + g.responseSize.WithLabelValues(service, method).Observe(float64(s.WireLength)) + case *stats.OutTrailer: + // Ignore outgoing trailers. OutTrailer doesn't have valid WireLength (there is a deprecated field, always set to 0). + } +} + +func (g *GrpcStatsHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { + return ctx +} + +func (g *GrpcStatsHandler) HandleConn(_ context.Context, _ stats.ConnStats) { + // Don't need +} + +func UnaryServerInstrument(hist *prometheus.HistogramVec) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { + begin := time.Now() + resp, err := handler(ctx, req) + observe(hist, info.FullMethod, err, time.Since(begin)) + return resp, err + } +} + +func StreamServerInstrument(hist *prometheus.HistogramVec) grpc.StreamServerInterceptor { + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + begin := time.Now() + err := handler(srv, ss) + observe(hist, info.FullMethod, err, time.Since(begin)) + return err + } +} + +func observe(hist *prometheus.HistogramVec, fullMethod string, err error, duration time.Duration) { + service, method := splitFullMethodName(fullMethod) + + // `hist` has following labels + // service - gRPC service name (e.g: weaviate.v1.Weaviate, weaviate.internal.cluster.ClusterService) + // method - Method from the gRPC service that got invoked. (e.g: Search, RemovePeer) + // status - grpc status (e.g: "OK", "CANCELED", "UNKNOWN", etc) + + labelValues := []string{ + service, + method, + errorToStatus(err), + } + hist.WithLabelValues(labelValues...).Observe(duration.Seconds()) +} + +func errorToStatus(err error) string { + code := errorToGrpcCode(err) + return code.String() +} + +func errorToGrpcCode(err error) codes.Code { + if err == nil { + return codes.OK + } + + if errors.Is(err, context.Canceled) { + return codes.Canceled + } + + type grpcStatus interface { + GRPCStatus() *status.Status + } + + var g grpcStatus + if errors.As(err, &g) { + st := g.GRPCStatus() + if st != nil { + return st.Code() + } + } + return codes.Unknown +} + +// splitFullMethodName converts full gRPC method call into `service` and `method` +// e.g: "/weaviate.v1.Weaviate/Search" -> "weaviate.v1.Weaviate", "/Search" +func splitFullMethodName(fullMethod string) (string, string) { + fullMethod = strings.TrimPrefix(fullMethod, "/") // remove leading slash + if i := strings.Index(fullMethod, "/"); i >= 0 { + return fullMethod[:i], fullMethod[i+1:] + } + return "unknown", "unknown" +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/monitoring/http.go b/platform/dbops/binaries/weaviate-src/usecases/monitoring/http.go new file mode 100644 index 0000000000000000000000000000000000000000..61a4283873d03fc058eaf9cf47d720d47f2b17e3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/monitoring/http.go @@ -0,0 +1,114 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package monitoring + +import ( + "io" + "net/http" + "strconv" + + "github.com/felixge/httpsnoop" + "github.com/prometheus/client_golang/prometheus" +) + +// StaticRouteLabel takes any http request and return canonical and static route label +// that can used in metrics without worrying about unbounded cardinality. + +// Examples: +// `/schema/Movies/properties` -> `/schema/{className}` +// `/replicas/indices/Movies/shards/hello0/objects` -> `/replicas/indices` +type StaticRouteLabel func(r *http.Request) (*http.Request, string) + +type InstrumentHandler struct { + inflightRequests *prometheus.GaugeVec + duration *prometheus.HistogramVec + + // in bytes + requestSize *prometheus.HistogramVec + responseSize *prometheus.HistogramVec + + // next is original http handler we instrument + next http.Handler + + routeLabel StaticRouteLabel +} + +func InstrumentHTTP( + next http.Handler, + routeLabel StaticRouteLabel, + inflight *prometheus.GaugeVec, + duration *prometheus.HistogramVec, + requestSize *prometheus.HistogramVec, + responseSize *prometheus.HistogramVec, +) *InstrumentHandler { + return &InstrumentHandler{ + next: next, + routeLabel: routeLabel, + inflightRequests: inflight, + duration: duration, + requestSize: requestSize, + responseSize: responseSize, + } +} + +func (i *InstrumentHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + r, route := i.routeLabel(r) + method := r.Method + + inflight := i.inflightRequests.WithLabelValues(method, route) + inflight.Inc() + defer inflight.Dec() + + origBody := r.Body + defer func() { + // We don't need `countingReadCloser` before this instrument handler + r.Body = origBody + }() + + cr := &countingReadCloser{ + r: r.Body, + } + r.Body = cr + + // This is where we run actual upstream http.Handler + respWithMetrics := httpsnoop.CaptureMetricsFn(w, func(rw http.ResponseWriter) { + i.next.ServeHTTP(rw, r) + }) + + i.requestSize.WithLabelValues(method, route).Observe(float64(cr.read)) + i.responseSize.WithLabelValues(method, route).Observe(float64(respWithMetrics.Written)) + + labelValues := []string{ + method, + route, + strconv.Itoa(respWithMetrics.Code), + } + + i.duration.WithLabelValues(labelValues...).Observe(respWithMetrics.Duration.Seconds()) +} + +type countingReadCloser struct { + r io.ReadCloser + read int64 +} + +func (c *countingReadCloser) Read(p []byte) (int, error) { + n, err := c.r.Read(p) + if n > 0 { + c.read += int64(n) + } + return n, err +} + +func (c *countingReadCloser) Close() error { + return c.r.Close() +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/monitoring/listen.go b/platform/dbops/binaries/weaviate-src/usecases/monitoring/listen.go new file mode 100644 index 0000000000000000000000000000000000000000..df0f71eabe19f10006e5ca146a4d5b7c6034f376 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/monitoring/listen.go @@ -0,0 +1,54 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package monitoring + +import ( + "net" + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +type countingListener struct { + net.Listener + count prometheus.Gauge +} + +func CountingListener(l net.Listener, g prometheus.Gauge) net.Listener { + return &countingListener{Listener: l, count: g} +} + +func (c *countingListener) Accept() (net.Conn, error) { + conn, err := c.Listener.Accept() + if err != nil { + return nil, err + } + c.count.Inc() + return &countingConn{Conn: conn, count: c.count}, nil +} + +type countingConn struct { + net.Conn + count prometheus.Gauge + once sync.Once +} + +func (c *countingConn) Close() error { + err := c.Conn.Close() + + // Client can call `Close()` any number of times on a single connection. Make sure to decrement the counter only once. + c.once.Do(func() { + c.count.Dec() + }) + + return err +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/monitoring/listen_test.go b/platform/dbops/binaries/weaviate-src/usecases/monitoring/listen_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1db31ab19800a5c041fad0a65223a9793bc4959a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/monitoring/listen_test.go @@ -0,0 +1,87 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package monitoring + +import ( + "errors" + "net" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/assert" +) + +type fakeListener struct { + net.Listener + acceptErr error + closeErr error +} + +type fakeConn struct { + net.Conn + closeErr error +} + +func (c *fakeConn) Close() error { + return c.closeErr +} + +func (c *fakeListener) Accept() (net.Conn, error) { + return &fakeConn{closeErr: c.closeErr}, c.acceptErr +} + +func TestCountingListener(t *testing.T) { + reg := prometheus.NewPedanticRegistry() + g := promauto.With(reg).NewGauge(prometheus.GaugeOpts{ + Namespace: "test", + Name: "gauge", + }) + + fake := &fakeListener{} + l := CountingListener(fake, g) + assert.Equal(t, float64(0), testutil.ToFloat64(g)) + + // Accepting connections should increment the gauge. + c1, err := l.Accept() + assert.NoError(t, err) + assert.Equal(t, float64(1), testutil.ToFloat64(g)) + c2, err := l.Accept() + assert.NoError(t, err) + assert.Equal(t, float64(2), testutil.ToFloat64(g)) + + // Closing connections should decrement the gauge. + assert.NoError(t, c1.Close()) + assert.Equal(t, float64(1), testutil.ToFloat64(g)) + assert.NoError(t, c2.Close()) + assert.Equal(t, float64(0), testutil.ToFloat64(g)) + + // Duplicate calls to Close should not decrement. + assert.NoError(t, c1.Close()) + assert.Equal(t, float64(0), testutil.ToFloat64(g)) + + // Accept errors should not cause an increment. + fake.acceptErr = errors.New("accept") + _, err = l.Accept() + assert.Error(t, err) + assert.Equal(t, float64(0), testutil.ToFloat64(g)) + + // Close errors should still decrement. + fake.acceptErr = nil + fake.closeErr = errors.New("close") + c3, err := l.Accept() + assert.NoError(t, err) + assert.Equal(t, float64(1), testutil.ToFloat64(g)) + assert.Error(t, c3.Close()) + assert.Equal(t, float64(0), testutil.ToFloat64(g)) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/monitoring/noop.go b/platform/dbops/binaries/weaviate-src/usecases/monitoring/noop.go new file mode 100644 index 0000000000000000000000000000000000000000..723f2458c31cdc81b44f4972f62030cf66af6ed5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/monitoring/noop.go @@ -0,0 +1,25 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package monitoring + +import "github.com/prometheus/client_golang/prometheus" + +// NoopRegisterer is a no-op Prometheus register. +var NoopRegisterer prometheus.Registerer = noopRegisterer{} + +type noopRegisterer struct{} + +func (n noopRegisterer) Register(_ prometheus.Collector) error { return nil } + +func (n noopRegisterer) MustRegister(_ ...prometheus.Collector) {} + +func (n noopRegisterer) Unregister(_ prometheus.Collector) bool { return true } diff --git a/platform/dbops/binaries/weaviate-src/usecases/monitoring/prometheus.go b/platform/dbops/binaries/weaviate-src/usecases/monitoring/prometheus.go new file mode 100644 index 0000000000000000000000000000000000000000..530170a7a0efd13877576237e0eb217afec62470 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/monitoring/prometheus.go @@ -0,0 +1,876 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package monitoring + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +const ( + DefaultMetricsNamespace = "weaviate" +) + +type Config struct { + Enabled bool `json:"enabled" yaml:"enabled" long:"enabled"` + Tool string `json:"tool" yaml:"tool"` + Port int `json:"port" yaml:"port" long:"port" default:"8081"` + Group bool `json:"group_classes" yaml:"group_classes"` + MonitorCriticalBucketsOnly bool `json:"monitor_critical_buckets_only" yaml:"monitor_critical_buckets_only"` + + // Metrics namespace group the metrics with common prefix. + MetricsNamespace string `json:"metrics_namespace" yaml:"metrics_namespace" long:"metrics_namespace" default:""` +} + +// NOTE: Do not add any new metrics to this global `PrometheusMetrics` struct. +// Instead add your metrics close the corresponding component. +type PrometheusMetrics struct { + Registerer prometheus.Registerer + + BatchTime *prometheus.HistogramVec + BatchSizeBytes *prometheus.SummaryVec + BatchSizeObjects prometheus.Summary + BatchSizeTenants prometheus.Summary + BatchDeleteTime *prometheus.SummaryVec + BatchCount *prometheus.CounterVec + BatchCountBytes *prometheus.CounterVec + ObjectsTime *prometheus.SummaryVec + LSMBloomFilters *prometheus.SummaryVec + AsyncOperations *prometheus.GaugeVec + LSMSegmentCount *prometheus.GaugeVec + LSMObjectsBucketSegmentCount *prometheus.GaugeVec + LSMCompressedVecsBucketSegmentCount *prometheus.GaugeVec + LSMSegmentCountByLevel *prometheus.GaugeVec + LSMSegmentUnloaded *prometheus.GaugeVec + LSMSegmentObjects *prometheus.GaugeVec + LSMSegmentSize *prometheus.GaugeVec + LSMMemtableSize *prometheus.GaugeVec + LSMMemtableDurations *prometheus.SummaryVec + LSMBitmapBuffersUsage *prometheus.CounterVec + ObjectCount *prometheus.GaugeVec + QueriesCount *prometheus.GaugeVec + RequestsTotal *prometheus.GaugeVec + QueriesDurations *prometheus.HistogramVec + QueriesFilteredVectorDurations *prometheus.SummaryVec + QueryDimensions *prometheus.CounterVec + QueryDimensionsCombined prometheus.Counter + GoroutinesCount *prometheus.GaugeVec + BackupRestoreDurations *prometheus.SummaryVec + BackupStoreDurations *prometheus.SummaryVec + BucketPauseDurations *prometheus.SummaryVec + BackupRestoreClassDurations *prometheus.SummaryVec + BackupRestoreBackupInitDurations *prometheus.SummaryVec + BackupRestoreFromStorageDurations *prometheus.SummaryVec + BackupRestoreDataTransferred *prometheus.CounterVec + BackupStoreDataTransferred *prometheus.CounterVec + FileIOWrites *prometheus.SummaryVec + FileIOReads *prometheus.SummaryVec + MmapOperations *prometheus.CounterVec + MmapProcMaps prometheus.Gauge + + // offload metric + QueueSize *prometheus.GaugeVec + QueueDiskUsage *prometheus.GaugeVec + QueuePaused *prometheus.GaugeVec + QueueCount *prometheus.GaugeVec + QueuePartitionProcessingDuration *prometheus.HistogramVec + + VectorIndexQueueInsertCount *prometheus.CounterVec + VectorIndexQueueDeleteCount *prometheus.CounterVec + + VectorIndexTombstones *prometheus.GaugeVec + VectorIndexTombstoneCleanupThreads *prometheus.GaugeVec + VectorIndexTombstoneCleanedCount *prometheus.CounterVec + VectorIndexTombstoneUnexpected *prometheus.CounterVec + VectorIndexTombstoneCycleStart *prometheus.GaugeVec + VectorIndexTombstoneCycleEnd *prometheus.GaugeVec + VectorIndexTombstoneCycleProgress *prometheus.GaugeVec + VectorIndexOperations *prometheus.GaugeVec + VectorIndexDurations *prometheus.SummaryVec + VectorIndexSize *prometheus.GaugeVec + VectorIndexMaintenanceDurations *prometheus.SummaryVec + VectorDimensionsSum *prometheus.GaugeVec + VectorSegmentsSum *prometheus.GaugeVec + + StartupProgress *prometheus.GaugeVec + StartupDurations *prometheus.SummaryVec + StartupDiskIO *prometheus.SummaryVec + + ShardsLoaded prometheus.Gauge + ShardsUnloaded prometheus.Gauge + ShardsLoading prometheus.Gauge + ShardsUnloading prometheus.Gauge + + // RAFT-based schema metrics + SchemaWrites *prometheus.SummaryVec + SchemaReadsLocal *prometheus.SummaryVec + SchemaReadsLeader *prometheus.SummaryVec + SchemaWaitForVersion *prometheus.SummaryVec + + TombstoneFindLocalEntrypoint *prometheus.CounterVec + TombstoneFindGlobalEntrypoint *prometheus.CounterVec + TombstoneReassignNeighbors *prometheus.CounterVec + TombstoneDeleteListSize *prometheus.GaugeVec + + Group bool + // Keeping metering to only the critical buckets (objects, vectors_compressed) + // helps cut down on noise when monitoring + LSMCriticalBucketsOnly bool + + // Deprecated metrics, keeping around because the classification features + // seems to sill use the old logic. However, those metrics are not actually + // used for the schema anymore, but only for the classification features. + SchemaTxOpened *prometheus.CounterVec + SchemaTxClosed *prometheus.CounterVec + SchemaTxDuration *prometheus.SummaryVec + + // Vectorization + T2VBatches *prometheus.GaugeVec + T2VBatchQueueDuration *prometheus.HistogramVec + T2VRequestDuration *prometheus.HistogramVec + T2VTokensInBatch *prometheus.HistogramVec + T2VTokensInRequest *prometheus.HistogramVec + T2VRateLimitStats *prometheus.GaugeVec + T2VRepeatStats *prometheus.GaugeVec + T2VRequestsPerBatch *prometheus.HistogramVec + + TokenizerDuration *prometheus.HistogramVec + TokenizerRequests *prometheus.CounterVec + TokenizerInitializeDuration *prometheus.HistogramVec + TokenCount *prometheus.CounterVec + TokenCountPerRequest *prometheus.HistogramVec + + // Currently targeted at OpenAI, the metrics will have to be added to every vectorizer for complete coverage + ModuleExternalRequests *prometheus.CounterVec + ModuleExternalRequestDuration *prometheus.HistogramVec + ModuleExternalBatchLength *prometheus.HistogramVec + ModuleExternalRequestSingleCount *prometheus.CounterVec + ModuleExternalRequestBatchCount *prometheus.CounterVec + ModuleExternalRequestSize *prometheus.HistogramVec + ModuleExternalResponseSize *prometheus.HistogramVec + ModuleExternalResponseStatus *prometheus.CounterVec + VectorizerRequestTokens *prometheus.HistogramVec + ModuleExternalError *prometheus.CounterVec + ModuleCallError *prometheus.CounterVec + ModuleBatchError *prometheus.CounterVec + + // Checksum metrics + ChecksumValidationDuration prometheus.Summary + ChecksumBytesRead prometheus.Summary +} + +func NewTenantOffloadMetrics(cfg Config, reg prometheus.Registerer) *TenantOffloadMetrics { + r := promauto.With(reg) + return &TenantOffloadMetrics{ + FetchedBytes: r.NewCounter(prometheus.CounterOpts{ + Namespace: cfg.MetricsNamespace, + Name: "tenant_offload_fetched_bytes_total", + }), + TransferredBytes: r.NewCounter(prometheus.CounterOpts{ + Namespace: cfg.MetricsNamespace, + Name: "tenant_offload_transferred_bytes_total", + }), + OpsDuration: r.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: cfg.MetricsNamespace, + Name: "tenant_offload_operation_duration_seconds", + Buckets: LatencyBuckets, + }, []string{"operation", "status"}), // status can be "success" or "failure" + } +} + +type TenantOffloadMetrics struct { + // NOTE: These ops are not GET or PUT requests to object storage. + // these are one of the `download`, `upload` or `delete`. Because we use s5cmd to talk + // to object storage currently. Which supports these operations at high level. + FetchedBytes prometheus.Counter + TransferredBytes prometheus.Counter + OpsDuration *prometheus.HistogramVec +} + +// NewHTPServerMetrics return the ServerMetrics that can be used in any of the grpc or http servers. +func NewHTTPServerMetrics(namespace string, reg prometheus.Registerer) *HTTPServerMetrics { + r := promauto.With(reg) + + return &HTTPServerMetrics{ + RequestDuration: r.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "http_request_duration_seconds", + Help: "Time (in seconds) spent serving requests.", + Buckets: LatencyBuckets, + }, []string{"method", "route", "status_code"}), + RequestBodySize: r.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "http_request_size_bytes", + Help: "Size (in bytes) of the request received.", + Buckets: sizeBuckets, + }, []string{"method", "route"}), + ResponseBodySize: r.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "http_response_size_bytes", + Help: "Size (in bytes) of the response sent.", + Buckets: sizeBuckets, + }, []string{"method", "route"}), + InflightRequests: r.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "http_requests_inflight", + Help: "Current number of inflight requests.", + }, []string{"method", "route"}), + } +} + +// HTTPServerMetrics exposes set of prometheus metrics for http servers. +type HTTPServerMetrics struct { + TCPActiveConnections *prometheus.GaugeVec + RequestDuration *prometheus.HistogramVec + RequestBodySize *prometheus.HistogramVec + ResponseBodySize *prometheus.HistogramVec + InflightRequests *prometheus.GaugeVec +} + +// GRPCServerMetrics exposes set of prometheus metrics for grpc servers. +type GRPCServerMetrics struct { + RequestDuration *prometheus.HistogramVec + RequestBodySize *prometheus.HistogramVec + ResponseBodySize *prometheus.HistogramVec + InflightRequests *prometheus.GaugeVec +} + +func NewGRPCServerMetrics(namespace string, reg prometheus.Registerer) *GRPCServerMetrics { + r := promauto.With(reg) + return &GRPCServerMetrics{ + RequestDuration: r.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "grpc_server_request_duration_seconds", + Help: "Time (in seconds) spent serving requests.", + Buckets: LatencyBuckets, + }, []string{"grpc_service", "method", "status"}), + RequestBodySize: r.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "grpc_server_request_size_bytes", + Help: "Size (in bytes) of the request received.", + Buckets: sizeBuckets, + }, []string{"grpc_service", "method"}), + ResponseBodySize: r.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "grpc_server_response_size_bytes", + Help: "Size (in bytes) of the response sent.", + Buckets: sizeBuckets, + }, []string{"grpc_service", "method"}), + InflightRequests: r.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "grpc_server_requests_inflight", + Help: "Current number of inflight requests.", + }, []string{"grpc_service", "method"}), + } +} + +// Delete Shard deletes existing label combinations that match both +// the shard and class name. If a metric is not collected at the shard +// level it is unaffected. This is to make sure that deleting a single +// shard (e.g. multi-tenancy) does not affect metrics for existing +// shards. +// +// In addition, there are some metrics that we explicitly keep, such +// as vector_dimensions_sum as they can be used in billing decisions. +func (pm *PrometheusMetrics) DeleteShard(className, shardName string) error { + if pm == nil { + return nil + } + + labels := prometheus.Labels{ + "class_name": className, + "shard_name": shardName, + } + pm.BatchTime.DeletePartialMatch(labels) + pm.BatchDeleteTime.DeletePartialMatch(labels) + pm.ObjectsTime.DeletePartialMatch(labels) + pm.ObjectCount.DeletePartialMatch(labels) + pm.QueriesFilteredVectorDurations.DeletePartialMatch(labels) + pm.AsyncOperations.DeletePartialMatch(labels) + pm.LSMBloomFilters.DeletePartialMatch(labels) + pm.LSMMemtableDurations.DeletePartialMatch(labels) + pm.LSMMemtableSize.DeletePartialMatch(labels) + pm.LSMMemtableDurations.DeletePartialMatch(labels) + pm.LSMSegmentCount.DeletePartialMatch(labels) + pm.LSMSegmentSize.DeletePartialMatch(labels) + pm.LSMSegmentCountByLevel.DeletePartialMatch(labels) + pm.QueueSize.DeletePartialMatch(labels) + pm.QueueDiskUsage.DeletePartialMatch(labels) + pm.QueuePaused.DeletePartialMatch(labels) + pm.QueueCount.DeletePartialMatch(labels) + pm.QueuePartitionProcessingDuration.DeletePartialMatch(labels) + pm.VectorIndexQueueInsertCount.DeletePartialMatch(labels) + pm.VectorIndexQueueDeleteCount.DeletePartialMatch(labels) + pm.VectorIndexTombstones.DeletePartialMatch(labels) + pm.VectorIndexTombstoneCleanupThreads.DeletePartialMatch(labels) + pm.VectorIndexTombstoneCleanedCount.DeletePartialMatch(labels) + pm.VectorIndexTombstoneUnexpected.DeletePartialMatch(labels) + pm.VectorIndexTombstoneCycleStart.DeletePartialMatch(labels) + pm.VectorIndexTombstoneCycleEnd.DeletePartialMatch(labels) + pm.VectorIndexTombstoneCycleProgress.DeletePartialMatch(labels) + pm.VectorIndexOperations.DeletePartialMatch(labels) + pm.VectorIndexMaintenanceDurations.DeletePartialMatch(labels) + pm.VectorIndexDurations.DeletePartialMatch(labels) + pm.VectorIndexSize.DeletePartialMatch(labels) + pm.StartupProgress.DeletePartialMatch(labels) + pm.StartupDurations.DeletePartialMatch(labels) + pm.StartupDiskIO.DeletePartialMatch(labels) + return nil +} + +// DeleteClass deletes all metrics that match the class name, but do +// not have a shard-specific label. See [DeleteShard] for more +// information. +func (pm *PrometheusMetrics) DeleteClass(className string) error { + if pm == nil { + return nil + } + + labels := prometheus.Labels{ + "class_name": className, + } + pm.QueriesCount.DeletePartialMatch(labels) + pm.QueriesDurations.DeletePartialMatch(labels) + pm.GoroutinesCount.DeletePartialMatch(labels) + pm.BackupRestoreClassDurations.DeletePartialMatch(labels) + pm.BackupRestoreBackupInitDurations.DeletePartialMatch(labels) + pm.BackupRestoreFromStorageDurations.DeletePartialMatch(labels) + pm.BackupStoreDurations.DeletePartialMatch(labels) + pm.BackupRestoreDataTransferred.DeletePartialMatch(labels) + pm.BackupStoreDataTransferred.DeletePartialMatch(labels) + pm.QueriesFilteredVectorDurations.DeletePartialMatch(labels) + + return nil +} + +const mb = 1024 * 1024 + +var ( + // msBuckets and sBuckets are deprecated. Use `LatencyBuckets` and `sizeBuckets` instead. + msBuckets = []float64{10, 50, 100, 500, 1000, 5000, 10000, 60000, 300000} + sBuckets = []float64{0.01, 0.1, 1, 10, 20, 30, 60, 120, 180, 500} + + // LatencyBuckets is default histogram bucket for response time (in seconds). + // It also includes request that served *very* fast and *very* slow + LatencyBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 50, 100} + + // sizeBuckets defines buckets for request/response body sizes (in bytes). + // TODO(kavi): Check with real data once deployed on prod and tweak accordingly. + sizeBuckets = []float64{1 * mb, 2.5 * mb, 5 * mb, 10 * mb, 25 * mb, 50 * mb, 100 * mb, 250 * mb} + + metrics *PrometheusMetrics = nil +) + +func init() { + metrics = newPrometheusMetrics() +} + +func InitConfig(cfg Config) { + metrics.Group = cfg.Group + metrics.LSMCriticalBucketsOnly = cfg.MonitorCriticalBucketsOnly +} + +func GetMetrics() *PrometheusMetrics { + return metrics +} + +func newPrometheusMetrics() *PrometheusMetrics { + return &PrometheusMetrics{ + Registerer: prometheus.DefaultRegisterer, + BatchTime: promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "batch_durations_ms", + Help: "Duration in ms of a single batch", + Buckets: msBuckets, + }, []string{"operation", "class_name", "shard_name"}), + BatchSizeBytes: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "batch_size_bytes", + Help: "Size of a raw batch request batch in bytes", + }, []string{"api"}), + BatchSizeObjects: promauto.NewSummary(prometheus.SummaryOpts{ + Name: "batch_size_objects", + Help: "Number of objects in a batch", + }), + BatchSizeTenants: promauto.NewSummary(prometheus.SummaryOpts{ + Name: "batch_size_tenants", + Help: "Number of unique tenants referenced in a batch", + }), + + BatchDeleteTime: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "batch_delete_durations_ms", + Help: "Duration in ms of a single delete batch", + }, []string{"operation", "class_name", "shard_name"}), + + BatchCount: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "batch_objects_processed_total", + Help: "Number of objects processed in a batch", + }, []string{"class_name", "shard_name"}), + + BatchCountBytes: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "batch_objects_processed_bytes", + Help: "Number of bytes processed in a batch", + }, []string{"class_name", "shard_name"}), + + ObjectsTime: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "objects_durations_ms", + Help: "Duration of an individual object operation. Also as part of batches.", + }, []string{"operation", "step", "class_name", "shard_name"}), + ObjectCount: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "object_count", + Help: "Number of currently ongoing async operations", + }, []string{"class_name", "shard_name"}), + + QueriesCount: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "concurrent_queries_count", + Help: "Number of concurrently running query operations", + }, []string{"class_name", "query_type"}), + + RequestsTotal: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "requests_total", + Help: "Number of all requests made", + }, []string{"status", "class_name", "api", "query_type"}), + + QueriesDurations: promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "queries_durations_ms", + Help: "Duration of queries in milliseconds", + Buckets: msBuckets, + }, []string{"class_name", "query_type"}), + + QueriesFilteredVectorDurations: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "queries_filtered_vector_durations_ms", + Help: "Duration of queries in milliseconds", + }, []string{"class_name", "shard_name", "operation"}), + + GoroutinesCount: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "concurrent_goroutines", + Help: "Number of concurrently running goroutines", + }, []string{"class_name", "query_type"}), + + AsyncOperations: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "async_operations_running", + Help: "Number of currently ongoing async operations", + }, []string{"operation", "class_name", "shard_name", "path"}), + + // LSM metrics + LSMSegmentCount: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "lsm_active_segments", + Help: "Number of currently present segments per shard", + }, []string{"strategy", "class_name", "shard_name", "path"}), + LSMObjectsBucketSegmentCount: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "lsm_objects_bucket_segment_count", + Help: "Number of segments per shard in the objects bucket", + }, []string{"strategy", "class_name", "shard_name", "path"}), + LSMCompressedVecsBucketSegmentCount: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "lsm_compressed_vecs_bucket_segment_count", + Help: "Number of segments per shard in the vectors_compressed bucket", + }, []string{"strategy", "class_name", "shard_name", "path"}), + LSMBloomFilters: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "lsm_bloom_filters_duration_ms", + Help: "Duration of bloom filter operations", + }, []string{"operation", "strategy", "class_name", "shard_name"}), + LSMSegmentObjects: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "lsm_segment_objects", + Help: "Number of objects/entries of segment by level", + }, []string{"strategy", "class_name", "shard_name", "path", "level"}), + LSMSegmentSize: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "lsm_segment_size", + Help: "Size of segment by level and unit", + }, []string{"strategy", "class_name", "shard_name", "path", "level", "unit"}), + LSMSegmentCountByLevel: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "lsm_segment_count", + Help: "Number of segments by level", + }, []string{"strategy", "class_name", "shard_name", "path", "level"}), + LSMSegmentUnloaded: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "lsm_segment_unloaded", + Help: "Number of unloaded segments", + }, []string{"strategy", "class_name", "shard_name", "path"}), + LSMMemtableSize: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "lsm_memtable_size", + Help: "Size of memtable by path", + }, []string{"strategy", "class_name", "shard_name", "path"}), + LSMMemtableDurations: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "lsm_memtable_durations_ms", + Help: "Time in ms for a bucket operation to complete", + }, []string{"strategy", "class_name", "shard_name", "path", "operation"}), + LSMBitmapBuffersUsage: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "lsm_bitmap_buffers_usage", + Help: "Number of bitmap buffers used by size", + }, []string{"size", "operation"}), + FileIOWrites: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "file_io_writes_total_bytes", + Help: "Total number of bytes written to disk", + }, []string{"operation", "strategy"}), + FileIOReads: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "file_io_reads_total_bytes", + Help: "Total number of bytes read from disk", + }, []string{"operation"}), + MmapOperations: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mmap_operations_total", + Help: "Total number of mmap operations", + }, []string{"operation", "strategy"}), + MmapProcMaps: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "mmap_proc_maps", + Help: "Number of entries in /proc/self/maps", + }), + + // Queue metrics + QueueSize: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "queue_size", + Help: "Number of records in the queue", + }, []string{"class_name", "shard_name"}), + QueueDiskUsage: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "queue_disk_usage", + Help: "Disk usage of the queue", + }, []string{"class_name", "shard_name"}), + QueuePaused: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "queue_paused", + Help: "Whether the queue is paused", + }, []string{"class_name", "shard_name"}), + QueueCount: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "queue_count", + Help: "Number of queues", + }, []string{"class_name", "shard_name"}), + QueuePartitionProcessingDuration: promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "queue_partition_processing_duration_ms", + Help: "Duration in ms of a single partition processing", + }, []string{"class_name", "shard_name"}), + + // Async indexing metrics + VectorIndexQueueInsertCount: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "vector_index_queue_insert_count", + Help: "Number of insert operations added to the vector index queue", + }, []string{"class_name", "shard_name", "target_vector"}), + VectorIndexQueueDeleteCount: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "vector_index_queue_delete_count", + Help: "Number of delete operations added to the vector index queue", + }, []string{"class_name", "shard_name", "target_vector"}), + + // Vector index metrics + VectorIndexTombstones: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "vector_index_tombstones", + Help: "Number of active vector index tombstones", + }, []string{"class_name", "shard_name"}), + VectorIndexTombstoneCleanupThreads: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "vector_index_tombstone_cleanup_threads", + Help: "Number of threads in use to clean up tombstones", + }, []string{"class_name", "shard_name"}), + VectorIndexTombstoneCleanedCount: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "vector_index_tombstone_cleaned", + Help: "Total number of deleted objects that have been cleaned up", + }, []string{"class_name", "shard_name"}), + VectorIndexTombstoneUnexpected: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "vector_index_tombstone_unexpected_total", + Help: "Total number of unexpected tombstones that were found, for example because a vector was not found for an existing id in the index", + }, []string{"class_name", "shard_name", "operation"}), + VectorIndexTombstoneCycleStart: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "vector_index_tombstone_cycle_start_timestamp_seconds", + Help: "Unix epoch timestamp of the start of the current tombstone cleanup cycle", + }, []string{"class_name", "shard_name"}), + VectorIndexTombstoneCycleEnd: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "vector_index_tombstone_cycle_end_timestamp_seconds", + Help: "Unix epoch timestamp of the end of the last tombstone cleanup cycle. A negative value indicates that the cycle is still running", + }, []string{"class_name", "shard_name"}), + VectorIndexTombstoneCycleProgress: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "vector_index_tombstone_cycle_progress", + Help: "A ratio (percentage) of the progress of the current tombstone cleanup cycle. 0 indicates the very beginning, 1 is a complete cycle.", + }, []string{"class_name", "shard_name"}), + VectorIndexOperations: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "vector_index_operations", + Help: "Total number of mutating operations on the vector index", + }, []string{"operation", "class_name", "shard_name"}), + VectorIndexSize: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "vector_index_size", + Help: "The size of the vector index. Typically larger than number of vectors, as it grows proactively.", + }, []string{"class_name", "shard_name"}), + VectorIndexMaintenanceDurations: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "vector_index_maintenance_durations_ms", + Help: "Duration of a sync or async vector index maintenance operation", + }, []string{"operation", "class_name", "shard_name"}), + VectorIndexDurations: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "vector_index_durations_ms", + Help: "Duration of typical vector index operations (insert, delete)", + }, []string{"operation", "step", "class_name", "shard_name"}), + VectorDimensionsSum: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "vector_dimensions_sum", + Help: "Total dimensions in a shard", + }, []string{"class_name", "shard_name"}), + VectorSegmentsSum: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "vector_segments_sum", + Help: "Total segments in a shard if quantization enabled", + }, []string{"class_name", "shard_name"}), + + // Startup metrics + StartupProgress: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "startup_progress", + Help: "A ratio (percentage) of startup progress for a particular component in a shard", + }, []string{"operation", "class_name", "shard_name"}), + StartupDurations: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "startup_durations_ms", + Help: "Duration of individual startup operations in ms", + }, []string{"operation", "class_name", "shard_name"}), + StartupDiskIO: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "startup_diskio_throughput", + Help: "Disk I/O throuhput in bytes per second", + }, []string{"operation", "class_name", "shard_name"}), + QueryDimensions: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "query_dimensions_total", + Help: "The vector dimensions used by any read-query that involves vectors", + }, []string{"query_type", "operation", "class_name"}), + QueryDimensionsCombined: promauto.NewCounter(prometheus.CounterOpts{ + Name: "query_dimensions_combined_total", + Help: "The vector dimensions used by any read-query that involves vectors, aggregated across all classes and shards. The sum of all labels for query_dimensions_total should always match this labelless metric", + }), + + // Backup/restore metrics + BackupRestoreDurations: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "backup_restore_ms", + Help: "Duration of a backup restore", + }, []string{"backend_name", "class_name"}), + BackupRestoreClassDurations: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "backup_restore_class_ms", + Help: "Duration restoring class", + }, []string{"class_name"}), + BackupRestoreBackupInitDurations: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "backup_restore_init_ms", + Help: "startup phase of a backup restore", + }, []string{"backend_name", "class_name"}), + BackupRestoreFromStorageDurations: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "backup_restore_from_backend_ms", + Help: "file transfer stage of a backup restore", + }, []string{"backend_name", "class_name"}), + BackupStoreDurations: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "backup_store_to_backend_ms", + Help: "file transfer stage of a backup restore", + }, []string{"backend_name", "class_name"}), + BucketPauseDurations: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "bucket_pause_durations_ms", + Help: "bucket pause durations", + }, []string{"bucket_dir"}), + BackupRestoreDataTransferred: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "backup_restore_data_transferred", + Help: "Total number of bytes transferred during a backup restore", + }, []string{"backend_name", "class_name"}), + BackupStoreDataTransferred: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "backup_store_data_transferred", + Help: "Total number of bytes transferred during a backup store", + }, []string{"backend_name", "class_name"}), + + // Shard metrics + ShardsLoaded: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "shards_loaded", + Help: "Number of shards loaded", + }), + ShardsUnloaded: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "shards_unloaded", + Help: "Number of shards on not loaded", + }), + ShardsLoading: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "shards_loading", + Help: "Number of shards in process of loading", + }), + ShardsUnloading: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "shards_unloading", + Help: "Number of shards in process of unloading", + }), + + // Schema TX-metrics. Can be removed when RAFT is ready + SchemaTxOpened: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "schema_tx_opened_total", + Help: "Total number of opened schema transactions", + }, []string{"ownership"}), + SchemaTxClosed: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "schema_tx_closed_total", + Help: "Total number of closed schema transactions. A close must be either successful or failed", + }, []string{"ownership", "status"}), + SchemaTxDuration: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "schema_tx_duration_seconds", + Help: "Mean duration of a tx by status", + }, []string{"ownership", "status"}), + + // RAFT-based schema metrics + SchemaWrites: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "schema_writes_seconds", + Help: "Duration of schema writes (which always involve the leader)", + }, []string{"type"}), + SchemaReadsLocal: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "schema_reads_local_seconds", + Help: "Duration of local schema reads that do not involve the leader", + }, []string{"type"}), + SchemaReadsLeader: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "schema_reads_leader_seconds", + Help: "Duration of schema reads that are passed to the leader", + }, []string{"type"}), + SchemaWaitForVersion: promauto.NewSummaryVec(prometheus.SummaryOpts{ + Name: "schema_wait_for_version_seconds", + Help: "Duration of waiting for a schema version to be reached", + }, []string{"type"}), + + TombstoneFindLocalEntrypoint: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "tombstone_find_local_entrypoint", + Help: "Total number of tombstone delete local entrypoint calls", + }, []string{"class_name", "shard_name"}), + TombstoneFindGlobalEntrypoint: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "tombstone_find_global_entrypoint", + Help: "Total number of tombstone delete global entrypoint calls", + }, []string{"class_name", "shard_name"}), + TombstoneReassignNeighbors: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "tombstone_reassign_neighbors", + Help: "Total number of tombstone reassign neighbor calls", + }, []string{"class_name", "shard_name"}), + TombstoneDeleteListSize: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "tombstone_delete_list_size", + Help: "Delete list size of tombstones", + }, []string{"class_name", "shard_name"}), + + T2VBatches: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "t2v_concurrent_batches", + Help: "Number of batches currently running", + }, []string{"vectorizer"}), + T2VBatchQueueDuration: promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "t2v_batch_queue_duration_seconds", + Help: "Time of a batch spend in specific portions of the queue", + Buckets: sBuckets, + }, []string{"vectorizer", "operation"}), + T2VRequestDuration: promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "t2v_request_duration_seconds", + Help: "Duration of an individual request to the vectorizer", + Buckets: sBuckets, + }, []string{"vectorizer"}), + T2VTokensInBatch: promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "t2v_tokens_in_batch", + Help: "Number of tokens in a user-defined batch", + Buckets: []float64{1, 10, 100, 1000, 10000, 100000, 1000000}, + }, []string{"vectorizer"}), + T2VTokensInRequest: promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "t2v_tokens_in_request", + Help: "Number of tokens in an individual request sent to the vectorizer", + Buckets: []float64{1, 10, 100, 1000, 10000, 100000, 1000000}, + }, []string{"vectorizer"}), + T2VRateLimitStats: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "t2v_rate_limit_stats", + Help: "Rate limit stats for the vectorizer", + }, []string{"vectorizer", "stat"}), + T2VRepeatStats: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "t2v_repeat_stats", + Help: "Why batch scheduling is repeated", + }, []string{"vectorizer", "stat"}), + T2VRequestsPerBatch: promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "t2v_requests_per_batch", + Help: "Number of requests required to process an entire (user) batch", + Buckets: []float64{1, 2, 5, 10, 100, 1000}, + }, []string{"vectorizer"}), + TokenizerDuration: promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "tokenizer_duration_seconds", + Help: "Duration of a tokenizer operation", + Buckets: LatencyBuckets, + }, []string{"tokenizer"}), + TokenizerRequests: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "tokenizer_requests_total", + Help: "Number of tokenizer requests", + }, []string{"tokenizer"}), + TokenizerInitializeDuration: promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "tokenizer_initialize_duration_seconds", + Help: "Duration of a tokenizer initialization operation", + Buckets: []float64{0.05, 0.1, 0.5, 1, 2, 5, 10}, + }, []string{"tokenizer"}), + TokenCount: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "token_count_total", + Help: "Number of tokens processed", + }, []string{"tokenizer"}), + TokenCountPerRequest: promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "token_count_per_request", + Help: "Number of tokens processed per request", + Buckets: []float64{1, 10, 50, 100, 500, 1000, 10000, 100000, 1000000}, + }, []string{"tokenizer"}), + ModuleExternalRequests: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "weaviate_module_requests_total", + Help: "Number of module requests to external APIs", + }, []string{"op", "api"}), + ModuleExternalRequestDuration: promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "weaviate_module_request_duration_seconds", + Help: "Duration of an individual request to a module external API", + Buckets: LatencyBuckets, + }, []string{"op", "api"}), + ModuleExternalBatchLength: promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "weaviate_module_requests_per_batch", + Help: "Number of items in a batch", + Buckets: []float64{1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608}, + }, []string{"op", "api"}), + ModuleExternalRequestSize: promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "weaviate_module_request_size_bytes", + Help: "Size (in bytes) of the request sent to an external API", + Buckets: []float64{256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608}, + }, []string{"op", "api"}), + ModuleExternalResponseSize: promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "weaviate_module_response_size_bytes", + Help: "Size (in bytes) of the response received from an external API", + Buckets: []float64{256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608}, + }, []string{"op", "api"}), + VectorizerRequestTokens: promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "weaviate_vectorizer_request_tokens", + Help: "Number of tokens in the request sent to an external vectorizer", + Buckets: []float64{0, 1, 10, 50, 100, 500, 1000, 5000, 10000, 100000, 1000000}, + }, []string{"inout", "api"}), + ModuleExternalRequestSingleCount: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "weaviate_module_request_single_count", + Help: "Number of single-item external API requests", + }, []string{"op", "api"}), + ModuleExternalRequestBatchCount: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "weaviate_module_request_batch_count", + Help: "Number of batched module requests", + }, []string{"op", "api"}), + ModuleExternalError: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "weaviate_module_error_total", + Help: "Number of OpenAI errors", + }, []string{"op", "module", "endpoint", "status_code"}), + ModuleCallError: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "weaviate_module_call_error_total", + Help: "Number of module errors (related to external calls)", + }, []string{"module", "endpoint", "status_code"}), + ModuleExternalResponseStatus: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "weaviate_module_response_status_total", + Help: "Number of API response statuses", + }, []string{"op", "endpoint", "status"}), + ModuleBatchError: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "weaviate_module_batch_error_total", + Help: "Number of batch errors", + }, []string{"operation", "class_name"}), + + // Checksum metrics + ChecksumValidationDuration: promauto.NewSummary(prometheus.SummaryOpts{ + Name: "checksum_validation_duration_seconds", + Help: "Duration of checksum validation", + }), + ChecksumBytesRead: promauto.NewSummary(prometheus.SummaryOpts{ + Name: "checksum_bytes_read", + Help: "Number of bytes read during checksum validation", + }), + } +} + +type OnceUponATimer struct { + sync.Once + Timer *prometheus.Timer +} + +func NewOnceTimer(promTimer *prometheus.Timer) *OnceUponATimer { + o := OnceUponATimer{} + o.Timer = promTimer + return &o +} + +func (o *OnceUponATimer) ObserveDurationOnce() { + o.Do(func() { + o.Timer.ObserveDuration() + }) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/monitoring/shards.go b/platform/dbops/binaries/weaviate-src/usecases/monitoring/shards.go new file mode 100644 index 0000000000000000000000000000000000000000..ee60ae8b98189e8d88a80dec4bf2ac73ca15e0f3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/monitoring/shards.go @@ -0,0 +1,61 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package monitoring + +// Move the shard from unloaded to in progress +func (pm *PrometheusMetrics) StartLoadingShard() { + if pm == nil { + return + } + + pm.ShardsUnloaded.Dec() + pm.ShardsLoading.Inc() +} + +// Move the shard from in progress to loaded +func (pm *PrometheusMetrics) FinishLoadingShard() { + if pm == nil { + return + } + + pm.ShardsLoading.Dec() + pm.ShardsLoaded.Inc() +} + +// Move the shard from loaded to in progress +func (pm *PrometheusMetrics) StartUnloadingShard() { + if pm == nil { + return + } + + pm.ShardsLoaded.Dec() + pm.ShardsUnloading.Inc() +} + +// Move the shard from in progress to unloaded +func (pm *PrometheusMetrics) FinishUnloadingShard() { + if pm == nil { + return + } + + pm.ShardsUnloading.Dec() + pm.ShardsUnloaded.Inc() +} + +// Register a new, unloaded shard +func (pm *PrometheusMetrics) NewUnloadedshard() { + if pm == nil { + return + } + + pm.ShardsUnloaded.Inc() +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/monitoring/shards_test.go b/platform/dbops/binaries/weaviate-src/usecases/monitoring/shards_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d67725671afdc85ca46d7e1e90994cc70bdebe6f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/monitoring/shards_test.go @@ -0,0 +1,118 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package monitoring + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/assert" +) + +func TestShards(t *testing.T) { + m := GetMetrics() + + t.Run("start_loading_shard", func(t *testing.T) { + // Setting base values + mv := m.ShardsLoading + mv.Set(1) + + mv = m.ShardsUnloaded + mv.Set(1) + + m.StartLoadingShard() + + loadingCount := testutil.ToFloat64(m.ShardsLoading) + unloadedCount := testutil.ToFloat64(m.ShardsUnloaded) + + assert.Equal(t, float64(2), loadingCount) + assert.Equal(t, float64(0), unloadedCount) + }) + + t.Run("finish_loading_shard", func(t *testing.T) { + // invariant: + // 1. `shards_loading` should be decremented + // 2. `shards_loaded` should be incremented + + // Setting base values + mv := m.ShardsLoading + mv.Set(1) + + mv = m.ShardsLoaded + mv.Set(1) + + m.FinishLoadingShard() + + loadingCount := testutil.ToFloat64(m.ShardsLoading) + loadedCount := testutil.ToFloat64(m.ShardsLoaded) + + assert.Equal(t, float64(0), loadingCount) // dec + assert.Equal(t, float64(2), loadedCount) // inc + }) + + t.Run("start_unloading_shard", func(t *testing.T) { + // invariant: + // 1. `shards_loaded` should be decremented + // 2. `shards_unloading` should be incremented + + // Setting base values + mv := m.ShardsLoaded + mv.Set(1) + + mv = m.ShardsUnloading + mv.Set(1) + + m.StartUnloadingShard() + + loadedCount := testutil.ToFloat64(m.ShardsLoaded) + unloadingCount := testutil.ToFloat64(m.ShardsUnloading) + + assert.Equal(t, float64(0), loadedCount) // dec + assert.Equal(t, float64(2), unloadingCount) // inc + }) + + t.Run("finish_unloading_shard", func(t *testing.T) { + // invariant: + // 1. `shards_unloading` should be decremented + // 2. `shards_unloaded` should be incremented + + // Setting base values + mv := m.ShardsUnloading + mv.Set(1) + + mv = m.ShardsUnloaded + mv.Set(1) + + m.FinishUnloadingShard() + + unloadingCount := testutil.ToFloat64(m.ShardsUnloading) + unloadedCount := testutil.ToFloat64(m.ShardsUnloaded) + + assert.Equal(t, float64(0), unloadingCount) // dec + assert.Equal(t, float64(2), unloadedCount) // inc + }) + + t.Run("new_unloaded_shard", func(t *testing.T) { + // invariant: + // 1. `shards_unloaded` should be incremented + + // Setting base values + mv := m.ShardsUnloaded + mv.Set(1) + + m.NewUnloadedshard() + + unloadedCount := testutil.ToFloat64(m.ShardsUnloaded) + + assert.Equal(t, float64(2), unloadedCount) // inc + }) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/multitenancy/extensions.go b/platform/dbops/binaries/weaviate-src/usecases/multitenancy/extensions.go new file mode 100644 index 0000000000000000000000000000000000000000..49541b8776642cc361dc4c9918a593f9a986baa8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/multitenancy/extensions.go @@ -0,0 +1,19 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package multitenancy + +import "github.com/weaviate/weaviate/entities/models" + +// IsMultiTenant returns true if a collection is multi-tenant, false otherwise +func IsMultiTenant(config *models.MultiTenancyConfig) bool { + return config != nil && config.Enabled +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/multitenancy/extensions_test.go b/platform/dbops/binaries/weaviate-src/usecases/multitenancy/extensions_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f18ac02837fdb93702ea2df861249a6e3e88033f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/multitenancy/extensions_test.go @@ -0,0 +1,66 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package multitenancy_test + +import ( + "testing" + + "github.com/weaviate/weaviate/usecases/multitenancy" + + "github.com/weaviate/weaviate/entities/models" +) + +func TestIsMultiTenant(t *testing.T) { + tests := []struct { + name string + config *models.MultiTenancyConfig + expected bool + }{ + { + name: "nil config should return false", + config: nil, + expected: false, + }, + { + name: "disabled multi-tenancy should return false", + config: &models.MultiTenancyConfig{ + Enabled: false, + }, + expected: false, + }, + { + name: "enabled multi-tenancy should return true", + config: &models.MultiTenancyConfig{ + Enabled: true, + }, + expected: true, + }, + { + name: "enabled with other fields should return true", + config: &models.MultiTenancyConfig{ + Enabled: true, + AutoTenantCreation: true, + AutoTenantActivation: false, + }, + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := multitenancy.IsMultiTenant(tt.config) + if result != tt.expected { + t.Errorf("IsMultiTenant() = %v, expected %v", result, tt.expected) + } + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/nodes/handler.go b/platform/dbops/binaries/weaviate-src/usecases/nodes/handler.go new file mode 100644 index 0000000000000000000000000000000000000000..be63348f0ec9e39be4b10a07d0d7d7a826380daa --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/nodes/handler.go @@ -0,0 +1,101 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package nodes + +import ( + "context" + "time" + + "github.com/weaviate/weaviate/usecases/auth/authorization/filter" + "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf" + + "github.com/weaviate/weaviate/entities/verbosity" + + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + schemaUC "github.com/weaviate/weaviate/usecases/schema" +) + +type db interface { + GetNodeStatus(ctx context.Context, className, shardName, verbosity string) ([]*models.NodeStatus, error) + GetNodeStatistics(ctx context.Context) ([]*models.Statistics, error) +} + +type Manager struct { + logger logrus.FieldLogger + authorizer authorization.Authorizer + db db + schemaManager *schemaUC.Manager + rbacconfig rbacconf.Config + minimumInternalTimeout time.Duration +} + +func NewManager(logger logrus.FieldLogger, authorizer authorization.Authorizer, + db db, schemaManager *schemaUC.Manager, rbacconfig rbacconf.Config, minimumInternalTimeout time.Duration, +) *Manager { + return &Manager{logger, authorizer, db, schemaManager, rbacconfig, minimumInternalTimeout} +} + +// GetNodeStatus aggregates the status across all nodes. It will try for a +// maximum of the configured timeout, then mark nodes as timed out. +func (m *Manager) GetNodeStatus(ctx context.Context, + principal *models.Principal, className, shardName, verbosityString string, +) ([]*models.NodeStatus, error) { + ctxWithTimeout, cancel := context.WithTimeout(ctx, m.minimumInternalTimeout) + defer cancel() + + // filter output after getting results if info about all shards is requested + filterOutput := verbosityString == verbosity.OutputVerbose && className == "" && m.rbacconfig.Enabled + + if !filterOutput { + if err := m.authorizer.Authorize(ctx, principal, authorization.READ, authorization.Nodes(verbosityString, className)...); err != nil { + return nil, err + } + } + + status, err := m.db.GetNodeStatus(ctxWithTimeout, className, shardName, verbosityString) + if err != nil { + return nil, err + } + + if filterOutput { + resourceFilter := filter.New[*models.NodeShardStatus](m.authorizer, m.rbacconfig) + + for i, nodeS := range status { + status[i].Shards = resourceFilter.Filter( + ctx, + m.logger, + principal, + nodeS.Shards, + authorization.READ, + func(shard *models.NodeShardStatus) string { + return authorization.Nodes(verbosityString, shard.Class)[0] + }, + ) + } + } + + return status, nil +} + +func (m *Manager) GetNodeStatistics(ctx context.Context, + principal *models.Principal, +) ([]*models.Statistics, error) { + ctxWithTimeout, cancel := context.WithTimeout(ctx, m.minimumInternalTimeout) + defer cancel() + + if err := m.authorizer.Authorize(ctx, principal, authorization.READ, authorization.Cluster()); err != nil { + return nil, err + } + return m.db.GetNodeStatistics(ctxWithTimeout) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/add.go b/platform/dbops/binaries/weaviate-src/usecases/objects/add.go new file mode 100644 index 0000000000000000000000000000000000000000..964845ac66cb542e2210a80a8dacab50d8d8d7af --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/add.go @@ -0,0 +1,190 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/versioned" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/classcache" + "github.com/weaviate/weaviate/entities/dto" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + authzerrs "github.com/weaviate/weaviate/usecases/auth/authorization/errors" + "github.com/weaviate/weaviate/usecases/memwatch" + "github.com/weaviate/weaviate/usecases/objects/validation" +) + +// AddObject Class Instance to the connected DB. +func (m *Manager) AddObject(ctx context.Context, principal *models.Principal, object *models.Object, + repl *additional.ReplicationProperties, +) (*models.Object, error) { + className := schema.UppercaseClassName(object.Class) + className, _ = m.resolveAlias(className) + object.Class = className + + if err := m.authorizer.Authorize(ctx, principal, authorization.CREATE, authorization.ShardsData(className, object.Tenant)...); err != nil { + return nil, err + } + + m.metrics.AddObjectInc() + defer m.metrics.AddObjectDec() + + ctx = classcache.ContextWithClassCache(ctx) + // we don't reveal any info that the end users cannot get through the structure of the data anyway + fetchedClasses, err := m.schemaManager.GetCachedClassNoAuth(ctx, className) + if err != nil { + return nil, err + } + + if err := m.allocChecker.CheckAlloc(memwatch.EstimateObjectMemory(object)); err != nil { + m.logger.WithError(err).Errorf("memory pressure: cannot process add object") + return nil, fmt.Errorf("cannot process add object: %w", err) + } + + obj, err := m.addObjectToConnectorAndSchema(ctx, principal, object, repl, fetchedClasses) + if err != nil { + return nil, err + } + + return obj, nil +} + +func (m *Manager) addObjectToConnectorAndSchema(ctx context.Context, principal *models.Principal, + object *models.Object, repl *additional.ReplicationProperties, fetchedClasses map[string]versioned.Class, +) (*models.Object, error) { + id, err := m.checkIDOrAssignNew(ctx, principal, object.Class, object.ID, repl, object.Tenant) + if err != nil { + return nil, err + } + object.ID = id + + schemaVersion, err := m.autoSchemaManager.autoSchema(ctx, principal, true, fetchedClasses, object) + if err != nil { + return nil, fmt.Errorf("invalid object: %w", err) + } + + if _, _, err = m.autoSchemaManager.autoTenants(ctx, principal, []*models.Object{object}, fetchedClasses); err != nil { + return nil, err + } + + class := fetchedClasses[object.Class].Class + + err = m.validateObjectAndNormalizeNames(ctx, repl, object, nil, fetchedClasses) + if err != nil { + return nil, NewErrInvalidUserInput("invalid object: %v", err) + } + + now := m.timeSource.Now() + object.CreationTimeUnix = now + object.LastUpdateTimeUnix = now + if object.Properties == nil { + object.Properties = map[string]interface{}{} + } + + err = m.modulesProvider.UpdateVector(ctx, object, class, m.findObject, m.logger) + if err != nil { + return nil, err + } + + // Ensure that the local schema has caught up to the version we used to validate + if err := m.schemaManager.WaitForUpdate(ctx, schemaVersion); err != nil { + return nil, fmt.Errorf("error waiting for local schema to catch up to version %d: %w", schemaVersion, err) + } + vectors, multiVectors, err := dto.GetVectors(object.Vectors) + if err != nil { + return nil, fmt.Errorf("put object: cannot get vectors: %w", err) + } + err = m.vectorRepo.PutObject(ctx, object, object.Vector, vectors, multiVectors, repl, schemaVersion) + if err != nil { + return nil, fmt.Errorf("put object: %w", err) + } + + return object, nil +} + +func (m *Manager) checkIDOrAssignNew(ctx context.Context, principal *models.Principal, + className string, id strfmt.UUID, repl *additional.ReplicationProperties, tenant string, +) (strfmt.UUID, error) { + if id == "" { + validatedID, err := generateUUID() + if err != nil { + return "", NewErrInternal("could not generate id: %v", err) + } + return validatedID, err + } + + // IDs are always returned lowercase, but they are written + // to disk as uppercase, when provided that way. Here we + // ensure they are lowercase on disk as well, so things + // like filtering are not affected. + // See: https://github.com/weaviate/weaviate/issues/2647 + validatedID := strfmt.UUID(strings.ToLower(id.String())) + + exists, err := m.vectorRepo.Exists(ctx, className, validatedID, repl, tenant) + if exists { + return "", NewErrInvalidUserInput("id '%s' already exists", id) + } else if err != nil { + switch { + case errors.As(err, &ErrInvalidUserInput{}): + return "", err + case errors.As(err, &ErrMultiTenancy{}): + // This may be fine, the class is configured to create non-existing tenants. + // A non-existing tenant will still be detected later on + if enterrors.IsTenantNotFound(err) { + break + } + return "", err + default: + if errors.As(err, &authzerrs.Forbidden{}) { + return "", err + } + return "", NewErrInternal("%v", err) + } + } + + return validatedID, nil +} + +func (m *Manager) validateObjectAndNormalizeNames(ctx context.Context, + repl *additional.ReplicationProperties, + incoming *models.Object, existing *models.Object, fetchedClasses map[string]versioned.Class, +) error { + err := m.validateUUID(incoming) + if err != nil { + return err + } + + if _, ok := fetchedClasses[incoming.Class]; !ok || fetchedClasses[incoming.Class].Class == nil { + return fmt.Errorf("class %q not found in schema", incoming.Class) + } + class := fetchedClasses[incoming.Class].Class + + return validation.New(m.vectorRepo.Exists, m.config, repl). + Object(ctx, class, incoming, existing) +} + +func (m *Manager) validateUUID(obj *models.Object) error { + // Validate schema given in body with the weaviate schema + _, err := uuid.Parse(obj.ID.String()) + return err +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/add_test.go b/platform/dbops/binaries/weaviate-src/usecases/objects/add_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d912d9def50114cb096c9c5d24860a4a8021392a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/add_test.go @@ -0,0 +1,556 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "strings" + "testing" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/auth/authorization/mocks" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/config/runtime" +) + +func Test_Add_Object_WithNoVectorizerModule(t *testing.T) { + var ( + vectorRepo *fakeVectorRepo + modulesProvider *fakeModulesProvider + manager *Manager + schemaManager *fakeSchemaManager + authorizer *mocks.FakeAuthorizer + ) + + sch := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "Foo", + Vectorizer: config.VectorizerModuleNone, + VectorIndexConfig: hnsw.UserConfig{}, + }, + { + Class: "FooSkipped", + Vectorizer: config.VectorizerModuleNone, + VectorIndexConfig: hnsw.UserConfig{ + Skip: true, + }, + }, + }, + }, + } + + resetAutoSchema := func(autoSchemaEnabled bool) { + vectorRepo = &fakeVectorRepo{} + vectorRepo.On("PutObject", mock.Anything, mock.Anything).Return(nil).Once() + schemaManager = &fakeSchemaManager{ + GetSchemaResponse: sch, + } + cfg := &config.WeaviateConfig{ + Config: config.Config{ + AutoSchema: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(autoSchemaEnabled), + DefaultString: schema.DataTypeText.String(), + }, + }, + } + authorizer = mocks.NewMockAuthorizer() + logger, _ := test.NewNullLogger() + + modulesProvider = getFakeModulesProvider() + metrics := &fakeMetrics{} + manager = NewManager(schemaManager, cfg, logger, authorizer, + vectorRepo, modulesProvider, metrics, nil, + NewAutoSchemaManager(schemaManager, vectorRepo, cfg, authorizer, logger, prometheus.NewPedanticRegistry())) + } + + reset := func() { + resetAutoSchema(false) + } + + t.Run("without an id set", func(t *testing.T) { + reset() + + ctx := context.Background() + class := &models.Object{ + Vector: []float32{0.1, 0.2, 0.3}, + Class: "Foo", + } + + modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(nil, nil) + + res, err := manager.AddObject(ctx, nil, class, nil) + require.Nil(t, err) + uuidDuringCreation := vectorRepo.Mock.Calls[0].Arguments.Get(0).(*models.Object).ID + + assert.Len(t, uuidDuringCreation, 36, "check that a uuid was assigned") + assert.Equal(t, uuidDuringCreation, res.ID, "check that connector add ID and user response match") + }) + + t.Run("with an explicit (correct) ID set", func(t *testing.T) { + reset() + + ctx := context.Background() + id := strfmt.UUID("5a1cd361-1e0d-42ae-bd52-ee09cb5f31cc") + object := &models.Object{ + Vector: []float32{0.1, 0.2, 0.3}, + ID: id, + Class: "Foo", + } + vectorRepo.On("Exists", "Foo", id).Return(false, nil).Once() + modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(nil, nil) + + res, err := manager.AddObject(ctx, nil, object, nil) + require.Nil(t, err) + uuidDuringCreation := vectorRepo.Mock.Calls[1].Arguments.Get(0).(*models.Object).ID + + assert.Equal(t, id, uuidDuringCreation, "check that a uuid is the user specified one") + assert.Equal(t, res.ID, uuidDuringCreation, "check that connector add ID and user response match") + }) + + t.Run("with an explicit (correct) uppercase id set", func(t *testing.T) { + reset() + + ctx := context.Background() + id := strfmt.UUID("4A334D0B-6347-40A0-A5AE-339677B20EDE") + lowered := strfmt.UUID(strings.ToLower(id.String())) + object := &models.Object{ + ID: id, + Class: "Foo", + Vector: []float32{0.1, 0.2, 0.3}, + } + vectorRepo.On("Exists", "Foo", lowered).Return(false, nil).Once() + modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(nil, nil) + + res, err := manager.AddObject(ctx, nil, object, nil) + require.Nil(t, err) + assert.Equal(t, res.ID, lowered, "check that id was lowered and added") + }) + + t.Run("with an explicit (correct) ID set and a property that doesn't exist", func(t *testing.T) { + resetAutoSchema(true) + + ctx := context.Background() + id := strfmt.UUID("5aaad361-1e0d-42ae-bb52-ee09cb5f31cc") + object := &models.Object{ + Vector: []float32{0.1, 0.2, 0.3}, + ID: id, + Class: "Foo", + Properties: map[string]interface{}{ + "newProperty": "string value", + }, + } + vectorRepo.On("Exists", "Foo", id).Return(false, nil).Once() + modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(nil, nil) + + res, err := manager.AddObject(ctx, nil, object, nil) + require.Nil(t, err) + uuidDuringCreation := vectorRepo.Mock.Calls[1].Arguments.Get(0).(*models.Object).ID + + assert.Equal(t, id, uuidDuringCreation, "check that a uuid is the user specified one") + assert.Equal(t, res.ID, uuidDuringCreation, "check that connector add ID and user response match") + }) + + t.Run("with a uuid that's already taken", func(t *testing.T) { + reset() + + ctx := context.Background() + id := strfmt.UUID("5a1cd361-1e0d-42ae-bd52-ee09cb5f31cc") + class := &models.Object{ + ID: id, + Class: "Foo", + } + + vectorRepo.On("Exists", "Foo", id).Return(true, nil).Once() + modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(nil, nil) + + _, err := manager.AddObject(ctx, nil, class, nil) + assert.Equal(t, NewErrInvalidUserInput("id '%s' already exists", id), err) + }) + + t.Run("with a uuid that's malformed", func(t *testing.T) { + reset() + + ctx := context.Background() + id := strfmt.UUID("5a1cd361-1e0d-4fooooooo2ae-bd52-ee09cb5f31cc") + class := &models.Object{ + ID: id, + Class: "Foo", + } + + vectorRepo.On("Exists", "Foo", id).Return(false, nil).Once() + + _, err := manager.AddObject(ctx, nil, class, nil) + assert.Equal(t, NewErrInvalidUserInput("invalid object: invalid UUID length: %d", len(id)), err) + }) + + t.Run("without a vector", func(t *testing.T) { + // Note that this was an invalid case before v1.10 which added this + // functionality, as part of + // https://github.com/weaviate/weaviate/issues/1800 + reset() + + ctx := context.Background() + class := &models.Object{ + Class: "Foo", + } + + modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(nil, nil) + + _, err := manager.AddObject(ctx, nil, class, nil) + assert.Nil(t, err) + }) + + t.Run("resolve alias before rbac check", func(t *testing.T) { + // This test is to make sure alias is resolved to correct + // collection before doing RBAC check on original class during add object. + + reset() + ctx := context.Background() + alias := "FooAlias" + class := "Foo" + + obj := &models.Object{ + Class: alias, // via alias + } + + schemaManager.resolveAliasTo = class + modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(nil, nil) + + _, err := manager.AddObject(ctx, nil, obj, nil) + require.NoError(t, err) + assert.Len(t, authorizer.Calls(), 1) + assert.Contains(t, authorizer.Calls()[0].Resources[0], class) // make sure rbac is called with "resolved class" name + }) + + t.Run("without a vector, but indexing skipped", func(t *testing.T) { + reset() + + ctx := context.Background() + class := &models.Object{ + Class: "FooSkipped", + } + + modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(nil, nil) + + _, err := manager.AddObject(ctx, nil, class, nil) + assert.Nil(t, err) + }) +} + +func Test_Add_Object_WithExternalVectorizerModule(t *testing.T) { + var ( + vectorRepo *fakeVectorRepo + modulesProvider *fakeModulesProvider + manager *Manager + ) + + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "Foo", + Vectorizer: config.VectorizerModuleText2VecContextionary, + VectorIndexConfig: hnsw.UserConfig{}, + }, + }, + }, + } + + reset := func() { + vectorRepo = &fakeVectorRepo{} + vectorRepo.On("PutObject", mock.Anything, mock.Anything).Return(nil).Once() + schemaManager := &fakeSchemaManager{ + GetSchemaResponse: schema, + } + cfg := &config.WeaviateConfig{} + authorizer := mocks.NewMockAuthorizer() + logger, _ := test.NewNullLogger() + metrics := &fakeMetrics{} + modulesProvider = getFakeModulesProvider() + modulesProvider.On("UsingRef2Vec", mock.Anything).Return(false) + manager = NewManager(schemaManager, cfg, logger, authorizer, + vectorRepo, modulesProvider, metrics, nil, + NewAutoSchemaManager(schemaManager, vectorRepo, cfg, authorizer, logger, prometheus.NewPedanticRegistry())) + } + + t.Run("without an id set", func(t *testing.T) { + reset() + + ctx := context.Background() + object := &models.Object{ + Class: "Foo", + } + + modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(nil, nil) + + res, err := manager.AddObject(ctx, nil, object, nil) + require.Nil(t, err) + + uuidDuringCreation := vectorRepo.Mock.Calls[0].Arguments.Get(0).(*models.Object).ID + + assert.Len(t, uuidDuringCreation, 36, "check that a uuid was assigned") + assert.Equal(t, uuidDuringCreation, res.ID, "check that connector add ID and user response match") + }) + + t.Run("with an explicit (correct) ID set", func(t *testing.T) { + reset() + + ctx := context.Background() + id := strfmt.UUID("5a1cd361-1e0d-42ae-bd52-ee09cb5f31cc") + object := &models.Object{ + ID: id, + Class: "Foo", + } + vectorRepo.On("Exists", "Foo", id).Return(false, nil).Once() + modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(nil, nil) + + res, err := manager.AddObject(ctx, nil, object, nil) + uuidDuringCreation := vectorRepo.Mock.Calls[1].Arguments.Get(0).(*models.Object).ID + + assert.Nil(t, err) + assert.Equal(t, id, uuidDuringCreation, "check that a uuid is the user specified one") + assert.Equal(t, res.ID, uuidDuringCreation, "check that connector add ID and user response match") + }) + + t.Run("with a uuid that's already taken", func(t *testing.T) { + reset() + + ctx := context.Background() + id := strfmt.UUID("5a1cd361-1e0d-42ae-bd52-ee09cb5f31cc") + object := &models.Object{ + ID: id, + Class: "Foo", + } + + vectorRepo.On("Exists", "Foo", id).Return(true, nil).Once() + modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(nil, nil) + + _, err := manager.AddObject(ctx, nil, object, nil) + assert.Equal(t, NewErrInvalidUserInput("id '%s' already exists", id), err) + }) + + t.Run("with a uuid that's malformed", func(t *testing.T) { + reset() + + ctx := context.Background() + id := strfmt.UUID("5a1cd361-1e0d-4f00000002ae-bd52-ee09cb5f31cc") + object := &models.Object{ + ID: id, + Class: "Foo", + } + + vectorRepo.On("Exists", "Foo", id).Return(false, nil).Once() + modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(nil, nil) + + _, err := manager.AddObject(ctx, nil, object, nil) + assert.Equal(t, NewErrInvalidUserInput("invalid object: invalid UUID length: %d", len(id)), err) + }) +} + +func Test_Add_Object_OverrideVectorizer(t *testing.T) { + var ( + vectorRepo *fakeVectorRepo + modulesProvider *fakeModulesProvider + manager *Manager + ) + + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "FooOverride", + Vectorizer: config.VectorizerModuleText2VecContextionary, + VectorIndexConfig: hnsw.UserConfig{}, + }, + }, + }, + } + + reset := func() { + vectorRepo = &fakeVectorRepo{} + vectorRepo.On("PutObject", mock.Anything, mock.Anything).Return(nil).Once() + schemaManager := &fakeSchemaManager{ + GetSchemaResponse: schema, + } + cfg := &config.WeaviateConfig{} + authorizer := mocks.NewMockAuthorizer() + logger, _ := test.NewNullLogger() + modulesProvider = getFakeModulesProvider() + metrics := &fakeMetrics{} + manager = NewManager(schemaManager, cfg, logger, + authorizer, vectorRepo, modulesProvider, metrics, nil, + NewAutoSchemaManager(schemaManager, vectorRepo, cfg, authorizer, logger, prometheus.NewPedanticRegistry())) + } + + t.Run("overriding the vector by explicitly specifying it", func(t *testing.T) { + reset() + + ctx := context.Background() + object := &models.Object{ + Class: "FooOverride", + Vector: []float32{9, 9, 9}, + } + + modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(object.Vector, nil) + + _, err := manager.AddObject(ctx, nil, object, nil) + require.Nil(t, err) + + vec := vectorRepo.Mock.Calls[0].Arguments.Get(1).([]float32) + + assert.Equal(t, []float32{9, 9, 9}, vec, "check that vector was overridden") + }) +} + +func Test_AddObjectEmptyProperties(t *testing.T) { + var ( + vectorRepo *fakeVectorRepo + modulesProvider *fakeModulesProvider + manager *Manager + ) + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "TestClass", + VectorIndexConfig: hnsw.UserConfig{}, + + Properties: []*models.Property{ + { + Name: "strings", + DataType: schema.DataTypeTextArray.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + }, + }, + }, + } + reset := func() { + vectorRepo = &fakeVectorRepo{} + vectorRepo.On("PutObject", mock.Anything, mock.Anything).Return(nil).Once() + schemaManager := &fakeSchemaManager{ + GetSchemaResponse: schema, + } + cfg := &config.WeaviateConfig{} + authorizer := mocks.NewMockAuthorizer() + logger, _ := test.NewNullLogger() + modulesProvider = getFakeModulesProvider() + metrics := &fakeMetrics{} + manager = NewManager(schemaManager, cfg, logger, + authorizer, vectorRepo, modulesProvider, metrics, nil, + NewAutoSchemaManager(schemaManager, vectorRepo, cfg, authorizer, logger, prometheus.NewPedanticRegistry())) + } + reset() + ctx := context.Background() + object := &models.Object{ + Class: "TestClass", + Vector: []float32{9, 9, 9}, + } + assert.Nil(t, object.Properties) + modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(nil, nil) + addedObject, err := manager.AddObject(ctx, nil, object, nil) + assert.Nil(t, err) + assert.NotNil(t, addedObject.Properties) +} + +func Test_AddObjectWithUUIDProps(t *testing.T) { + var ( + vectorRepo *fakeVectorRepo + modulesProvider *fakeModulesProvider + manager *Manager + ) + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "TestClass", + VectorIndexConfig: hnsw.UserConfig{}, + + Properties: []*models.Property{ + { + Name: "my_id", + DataType: []string{"uuid"}, + }, + { + Name: "my_idz", + DataType: []string{"uuid[]"}, + }, + }, + }, + }, + }, + } + reset := func() { + vectorRepo = &fakeVectorRepo{} + vectorRepo.On("PutObject", mock.Anything, mock.Anything).Return(nil).Once() + schemaManager := &fakeSchemaManager{ + GetSchemaResponse: schema, + } + cfg := &config.WeaviateConfig{} + authorizer := mocks.NewMockAuthorizer() + logger, _ := test.NewNullLogger() + modulesProvider = getFakeModulesProvider() + metrics := &fakeMetrics{} + manager = NewManager(schemaManager, cfg, logger, + authorizer, vectorRepo, modulesProvider, metrics, nil, + NewAutoSchemaManager(schemaManager, vectorRepo, cfg, authorizer, logger, prometheus.NewPedanticRegistry()), + ) + } + reset() + ctx := context.Background() + object := &models.Object{ + Class: "TestClass", + Vector: []float32{9, 9, 9}, + Properties: map[string]interface{}{ + "my_id": "28bafa1e-7956-4c58-8a02-4499a9d15253", + "my_idz": []any{"28bafa1e-7956-4c58-8a02-4499a9d15253"}, + }, + } + modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(nil, nil) + addedObject, err := manager.AddObject(ctx, nil, object, nil) + require.Nil(t, err) + require.NotNil(t, addedObject.Properties) + + expectedID := uuid.MustParse("28bafa1e-7956-4c58-8a02-4499a9d15253") + expectedIDz := []uuid.UUID{uuid.MustParse("28bafa1e-7956-4c58-8a02-4499a9d15253")} + + assert.Equal(t, expectedID, addedObject.Properties.(map[string]interface{})["my_id"]) + assert.Equal(t, expectedIDz, addedObject.Properties.(map[string]interface{})["my_idz"]) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/authorization_test.go b/platform/dbops/binaries/weaviate-src/usecases/objects/authorization_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7b4593ce1c2a70fddcceae8b707c95d75f067084 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/authorization_test.go @@ -0,0 +1,321 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "errors" + "fmt" + "reflect" + "testing" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/mocks" + "github.com/weaviate/weaviate/usecases/config" +) + +// A component-test like test suite that makes sure that every available UC is +// potentially protected with the Authorization plugin + +func Test_Kinds_Authorization(t *testing.T) { + type testCase struct { + methodName string + additionalArgs []interface{} + expectedVerb string + expectedResources []string + } + + tests := []testCase{ + // single kind + { + methodName: "AddObject", + additionalArgs: []interface{}{(*models.Object)(nil)}, + expectedVerb: authorization.UPDATE, + expectedResources: authorization.ShardsMetadata("", ""), + }, + { + methodName: "ValidateObject", + additionalArgs: []interface{}{(*models.Object)(nil)}, + expectedVerb: authorization.READ, + expectedResources: []string{authorization.Objects("", "", "")}, + }, + { + methodName: "GetObject", + additionalArgs: []interface{}{"", strfmt.UUID("foo"), additional.Properties{}}, + expectedVerb: authorization.READ, + expectedResources: []string{authorization.Objects("", "", "foo")}, + }, + { + methodName: "DeleteObject", + additionalArgs: []interface{}{"class", strfmt.UUID("foo")}, + expectedVerb: authorization.DELETE, + expectedResources: []string{authorization.Objects("class", "", "foo")}, + }, + { // deprecated by the one above + methodName: "DeleteObject", + additionalArgs: []interface{}{"class", strfmt.UUID("foo")}, + expectedVerb: authorization.DELETE, + expectedResources: []string{authorization.Objects("class", "", "foo")}, + }, + { + methodName: "UpdateObject", + additionalArgs: []interface{}{"class", strfmt.UUID("foo"), (*models.Object)(nil)}, + expectedVerb: authorization.UPDATE, + expectedResources: []string{authorization.Objects("class", "", "foo")}, + }, + { // deprecated by the one above + methodName: "UpdateObject", + additionalArgs: []interface{}{"class", strfmt.UUID("foo"), (*models.Object)(nil)}, + expectedVerb: authorization.UPDATE, + expectedResources: []string{authorization.Objects("class", "", "foo")}, + }, + { + methodName: "MergeObject", + additionalArgs: []interface{}{ + &models.Object{Class: "class", ID: "foo"}, + (*additional.ReplicationProperties)(nil), + }, + expectedVerb: authorization.UPDATE, + expectedResources: []string{authorization.Objects("class", "", "foo")}, + }, + { + methodName: "GetObjectsClass", + additionalArgs: []interface{}{strfmt.UUID("foo")}, + expectedVerb: authorization.READ, + expectedResources: []string{authorization.Objects("", "", "foo")}, + }, + { + methodName: "GetObjectClassFromName", + additionalArgs: []interface{}{strfmt.UUID("foo")}, + expectedVerb: authorization.READ, + expectedResources: []string{authorization.Objects("", "", "foo")}, + }, + { + methodName: "HeadObject", + additionalArgs: []interface{}{"class", strfmt.UUID("foo")}, + expectedVerb: authorization.READ, + expectedResources: []string{authorization.Objects("class", "", "foo")}, + }, + { // deprecated by the one above + methodName: "HeadObject", + additionalArgs: []interface{}{"", strfmt.UUID("foo")}, + expectedVerb: authorization.READ, + expectedResources: []string{authorization.Objects("", "", "foo")}, + }, + + // query objects + { + methodName: "Query", + additionalArgs: []interface{}{new(QueryParams)}, + expectedVerb: authorization.READ, + expectedResources: []string{authorization.ShardsMetadata("", "")[0]}, + }, + + { // list objects is deprecated by query + methodName: "GetObjects", + additionalArgs: []interface{}{(*int64)(nil), (*int64)(nil), (*string)(nil), (*string)(nil), additional.Properties{}}, + expectedVerb: authorization.READ, + expectedResources: []string{authorization.Objects("", "", "")}, + }, + + // reference on objects + { + methodName: "AddObjectReference", + additionalArgs: []interface{}{AddReferenceInput{Class: "class", ID: strfmt.UUID("foo"), Property: "some prop"}, (*models.SingleRef)(nil)}, + expectedVerb: authorization.UPDATE, + expectedResources: []string{authorization.Objects("class", "", "foo")}, + }, + { + methodName: "DeleteObjectReference", + additionalArgs: []interface{}{strfmt.UUID("foo"), "some prop", (*models.SingleRef)(nil)}, + expectedVerb: authorization.UPDATE, + expectedResources: []string{authorization.Objects("", "", "foo")}, + }, + { + methodName: "UpdateObjectReferences", + additionalArgs: []interface{}{&PutReferenceInput{Class: "class", ID: strfmt.UUID("foo"), Property: "some prop"}}, + expectedVerb: authorization.UPDATE, + expectedResources: []string{authorization.Objects("class", "", "foo")}, + }, + } + + t.Run("verify that a test for every public method exists", func(t *testing.T) { + testedMethods := make([]string, len(tests)) + for i, test := range tests { + testedMethods[i] = test.methodName + } + + for _, method := range allExportedMethods(&Manager{}, "") { + assert.Contains(t, testedMethods, method) + } + }) + + t.Run("verify the tested methods require correct permissions from the authorizer", func(t *testing.T) { + principal := &models.Principal{} + logger, _ := test.NewNullLogger() + for _, test := range tests { + if test.methodName != "MergeObject" { + continue + } + t.Run(test.methodName, func(t *testing.T) { + schemaManager := &fakeSchemaManager{} + cfg := &config.WeaviateConfig{} + authorizer := mocks.NewMockAuthorizer() + authorizer.SetErr(errors.New("just a test fake")) + vectorRepo := &fakeVectorRepo{} + manager := NewManager(schemaManager, + cfg, logger, authorizer, + vectorRepo, getFakeModulesProvider(), nil, nil, + NewAutoSchemaManager(schemaManager, vectorRepo, cfg, authorizer, logger, prometheus.NewPedanticRegistry())) + + args := append([]interface{}{context.Background(), principal}, test.additionalArgs...) + out, _ := callFuncByName(manager, test.methodName, args...) + + require.Len(t, authorizer.Calls(), 1, "authorizer must be called") + aerr := out[len(out)-1].Interface().(error) + var customErr *Error + if !errors.As(aerr, &customErr) || !customErr.Forbidden() { + assert.Equal(t, errors.New("just a test fake"), aerr, + "execution must abort with authorizer error") + } + + assert.Equal(t, mocks.AuthZReq{Principal: principal, Verb: test.expectedVerb, Resources: test.expectedResources}, + authorizer.Calls()[0], "correct parameters must have been used on authorizer") + }) + } + }) +} + +func Test_BatchKinds_Authorization(t *testing.T) { + type testCase struct { + methodName string + additionalArgs []interface{} + expectedVerb string + expectedResources []string + } + + uri := strfmt.URI("weaviate://localhost/Class/" + uuid.New().String()) + + tests := []testCase{ + { + methodName: "AddObjects", + additionalArgs: []interface{}{ + []*models.Object{{}}, + []*string{}, + &additional.ReplicationProperties{}, + }, + expectedVerb: authorization.UPDATE, + expectedResources: authorization.ShardsData("", ""), + }, + { + methodName: "AddReferences", + additionalArgs: []interface{}{ + []*models.BatchReference{{From: uri + "/ref", To: uri, Tenant: ""}}, + &additional.ReplicationProperties{}, + }, + expectedVerb: authorization.UPDATE, + expectedResources: authorization.ShardsData("Class", ""), + }, + { + methodName: "DeleteObjects", + additionalArgs: []interface{}{ + &models.BatchDeleteMatch{}, + (*int64)(nil), + (*bool)(nil), + (*string)(nil), + &additional.ReplicationProperties{}, + "", + }, + expectedVerb: authorization.DELETE, + expectedResources: authorization.ShardsData("", ""), + }, + } + + t.Run("verify that a test for every public method exists", func(t *testing.T) { + testedMethods := make([]string, len(tests)) + for i, test := range tests { + testedMethods[i] = test.methodName + } + + // exception is public method for GRPC which has its own authorization check + for _, method := range allExportedMethods(&BatchManager{}, "DeleteObjectsFromGRPCAfterAuth", "AddObjectsGRPCAfterAuth") { + assert.Contains(t, testedMethods, method) + } + }) + + t.Run("verify the tested methods require correct permissions from the authorizer", func(t *testing.T) { + principal := &models.Principal{} + logger, _ := test.NewNullLogger() + for _, test := range tests { + schemaManager := &fakeSchemaManager{} + cfg := &config.WeaviateConfig{} + authorizer := mocks.NewMockAuthorizer() + authorizer.SetErr(errors.New("just a test fake")) + vectorRepo := &fakeVectorRepo{} + modulesProvider := getFakeModulesProvider() + manager := NewBatchManager(vectorRepo, modulesProvider, schemaManager, cfg, logger, authorizer, nil, + NewAutoSchemaManager(schemaManager, vectorRepo, cfg, authorizer, logger, prometheus.NewPedanticRegistry())) + + args := append([]interface{}{context.Background(), principal}, test.additionalArgs...) + out, _ := callFuncByName(manager, test.methodName, args...) + + require.Len(t, authorizer.Calls(), 1, "authorizer must be called") + assert.Equal(t, errors.New("just a test fake"), out[len(out)-1].Interface(), + "execution must abort with authorizer error") + assert.Equal(t, mocks.AuthZReq{Principal: principal, Verb: test.expectedVerb, Resources: test.expectedResources}, + authorizer.Calls()[0], "correct parameters must have been used on authorizer") + } + }) +} + +// inspired by https://stackoverflow.com/a/33008200 +func callFuncByName(manager interface{}, funcName string, params ...interface{}) (out []reflect.Value, err error) { + managerValue := reflect.ValueOf(manager) + m := managerValue.MethodByName(funcName) + if !m.IsValid() { + return make([]reflect.Value, 0), fmt.Errorf("Method not found \"%s\"", funcName) + } + in := make([]reflect.Value, len(params)) + for i, param := range params { + in[i] = reflect.ValueOf(param) + } + out = m.Call(in) + return +} + +func allExportedMethods(subject interface{}, exceptions ...string) []string { + var methods []string + subjectType := reflect.TypeOf(subject) +methodLoop: + for i := 0; i < subjectType.NumMethod(); i++ { + name := subjectType.Method(i).Name + for j := range exceptions { + if name == exceptions[j] { + continue methodLoop + } + } + if name[0] >= 'A' && name[0] <= 'Z' { + methods = append(methods, name) + } + } + + return methods +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/auto_schema.go b/platform/dbops/binaries/weaviate-src/usecases/objects/auto_schema.go new file mode 100644 index 0000000000000000000000000000000000000000..bf5e8a5516d998464df99b9ee887daf30612e670 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/auto_schema.go @@ -0,0 +1,607 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + "github.com/google/uuid" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/classcache" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/versioned" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/objects/validation" +) + +type AutoSchemaManager struct { + mutex sync.RWMutex + authorizer authorization.Authorizer + schemaManager schemaManager + vectorRepo VectorRepo + config config.AutoSchema + logger logrus.FieldLogger + + // Metrics without labels to avoid cardinality issues + opsDuration *prometheus.HistogramVec + tenantsCount prometheus.Counter +} + +func NewAutoSchemaManager(schemaManager schemaManager, vectorRepo VectorRepo, + config *config.WeaviateConfig, authorizer authorization.Authorizer, logger logrus.FieldLogger, + reg prometheus.Registerer, +) *AutoSchemaManager { + r := promauto.With(reg) + + tenantsCount := r.NewCounter( + prometheus.CounterOpts{ + Name: "weaviate_auto_tenant_total", + Help: "Total number of tenants processed", + }, + ) + + opDuration := r.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "weaviate_auto_tenant_duration_seconds", + Help: "Time spent in auto tenant operations", + }, + []string{"operation"}, + ) + + return &AutoSchemaManager{ + schemaManager: schemaManager, + vectorRepo: vectorRepo, + config: config.Config.AutoSchema, + logger: logger, + authorizer: authorizer, + tenantsCount: tenantsCount, + opsDuration: opDuration, + } +} + +func (m *AutoSchemaManager) autoSchema(ctx context.Context, principal *models.Principal, + allowCreateClass bool, classes map[string]versioned.Class, objects ...*models.Object, +) (uint64, error) { + enabled := m.config.Enabled.Get() + + if !enabled { + return 0, nil + } + + m.mutex.Lock() + defer m.mutex.Unlock() + + var maxSchemaVersion uint64 + + for _, object := range objects { + if object == nil { + return 0, ErrInvalidUserInput{validation.ErrorMissingObject} + } + + if len(object.Class) == 0 { + // stop performing auto schema + return 0, ErrInvalidUserInput{validation.ErrorMissingClass} + } + + vclass := classes[object.Class] + + schemaClass := vclass.Class + schemaVersion := vclass.Version + + if schemaClass == nil && !allowCreateClass { + return 0, ErrInvalidUserInput{"given class does not exist"} + } + properties, err := m.getProperties(object) + if err != nil { + return 0, err + } + + if schemaClass == nil { + err := m.authorizer.Authorize(ctx, principal, authorization.CREATE, authorization.CollectionsMetadata(object.Class)...) + if err != nil { + return 0, fmt.Errorf("auto schema can't create objects because can't create collection: %w", err) + } + + // it returns the newly created class and version + schemaClass, schemaVersion, err = m.createClass(ctx, principal, object.Class, properties) + if err != nil { + return 0, err + } + + classes[object.Class] = versioned.Class{Class: schemaClass, Version: schemaVersion} + classcache.RemoveClassFromContext(ctx, object.Class) + } else { + if newProperties := schema.DedupProperties(schemaClass.Properties, properties); len(newProperties) > 0 { + err := m.authorizer.Authorize(ctx, principal, authorization.UPDATE, authorization.CollectionsMetadata(schemaClass.Class)...) + if err != nil { + return 0, fmt.Errorf("auto schema can't create objects because can't update collection: %w", err) + } + schemaClass, schemaVersion, err = m.schemaManager.AddClassProperty(ctx, + principal, schemaClass, schemaClass.Class, true, newProperties...) + if err != nil { + return 0, err + } + classes[object.Class] = versioned.Class{Class: schemaClass, Version: schemaVersion} + classcache.RemoveClassFromContext(ctx, object.Class) + } + } + + if schemaVersion > maxSchemaVersion { + maxSchemaVersion = schemaVersion + } + } + return maxSchemaVersion, nil +} + +func (m *AutoSchemaManager) createClass(ctx context.Context, principal *models.Principal, + className string, properties []*models.Property, +) (*models.Class, uint64, error) { + now := time.Now() + class := &models.Class{ + Class: className, + Properties: properties, + Description: "This property was generated by Weaviate's auto-schema feature on " + now.Format(time.ANSIC), + } + m.logger. + WithField("auto_schema", "createClass"). + Debugf("create class %s", className) + newClass, schemaVersion, err := m.schemaManager.AddClass(ctx, principal, class) + return newClass, schemaVersion, err +} + +func (m *AutoSchemaManager) getProperties(object *models.Object) ([]*models.Property, error) { + properties := []*models.Property{} + if props, ok := object.Properties.(map[string]interface{}); ok { + for name, value := range props { + now := time.Now() + dt, err := m.determineType(value, false) + if err != nil { + return nil, fmt.Errorf("property '%s' on class '%s': %w", name, object.Class, err) + } + + var nestedProperties []*models.NestedProperty + if len(dt) == 1 { + switch dt[0] { + case schema.DataTypeObject: + nestedProperties, err = m.determineNestedProperties(value.(map[string]interface{}), now) + case schema.DataTypeObjectArray: + nestedProperties, err = m.determineNestedPropertiesOfArray(value.([]interface{}), now) + default: + // do nothing + } + } + if err != nil { + return nil, fmt.Errorf("property '%s' on class '%s': %w", name, object.Class, err) + } + + property := &models.Property{ + Name: name, + DataType: m.getDataTypes(dt), + Description: "This property was generated by Weaviate's auto-schema feature on " + now.Format(time.ANSIC), + NestedProperties: nestedProperties, + } + properties = append(properties, property) + } + } + return properties, nil +} + +func (m *AutoSchemaManager) getDataTypes(dataTypes []schema.DataType) []string { + dtypes := make([]string, len(dataTypes)) + for i := range dataTypes { + dtypes[i] = string(dataTypes[i]) + } + return dtypes +} + +func (m *AutoSchemaManager) determineType(value interface{}, ofNestedProp bool) ([]schema.DataType, error) { + fallbackDataType := []schema.DataType{schema.DataTypeText} + fallbackArrayDataType := []schema.DataType{schema.DataTypeTextArray} + + switch typedValue := value.(type) { + case string: + if _, err := time.Parse(time.RFC3339, typedValue); err == nil { + return []schema.DataType{schema.DataType(m.config.DefaultDate)}, nil + } + if _, err := uuid.Parse(typedValue); err == nil { + return []schema.DataType{schema.DataTypeUUID}, nil + } + if m.config.DefaultString != "" { + return []schema.DataType{schema.DataType(m.config.DefaultString)}, nil + } + return []schema.DataType{schema.DataTypeText}, nil + case json.Number: + return []schema.DataType{schema.DataType(m.config.DefaultNumber)}, nil + case float64: + return []schema.DataType{schema.DataTypeNumber}, nil + case int64: + return []schema.DataType{schema.DataTypeInt}, nil + case bool: + return []schema.DataType{schema.DataTypeBoolean}, nil + case map[string]interface{}: + // nested properties does not support phone and geo data types + if !ofNestedProp { + if dt, ok := m.asGeoCoordinatesType(typedValue); ok { + return dt, nil + } + if dt, ok := m.asPhoneNumber(typedValue); ok { + return dt, nil + } + } + return []schema.DataType{schema.DataTypeObject}, nil + case []interface{}: + if len(typedValue) == 0 { + return fallbackArrayDataType, nil + } + + refDataTypes := []schema.DataType{} + var isRef bool + var determinedDataType schema.DataType + + for i := range typedValue { + dataType, refDataType, err := m.determineArrayType(typedValue[i], ofNestedProp) + if err != nil { + return nil, fmt.Errorf("element [%d]: %w", i, err) + } + if i == 0 { + isRef = refDataType != "" + determinedDataType = dataType + } + if dataType != "" { + // if an array contains text and UUID/Date, the type should be text + if determinedDataType == schema.DataTypeTextArray && (dataType == schema.DataTypeUUIDArray || dataType == schema.DataTypeDateArray) { + continue + } + if determinedDataType == schema.DataTypeDateArray && (dataType == schema.DataTypeUUIDArray || dataType == schema.DataTypeTextArray) { + determinedDataType = schema.DataTypeTextArray + continue + } + if determinedDataType == schema.DataTypeUUIDArray && (dataType == schema.DataTypeDateArray || dataType == schema.DataTypeTextArray) { + determinedDataType = schema.DataTypeTextArray + continue + } + + if isRef { + return nil, fmt.Errorf("element [%d]: mismatched data type - reference expected, got '%s'", + i, asSingleDataType(dataType)) + } + if dataType != determinedDataType { + return nil, fmt.Errorf("element [%d]: mismatched data type - '%s' expected, got '%s'", + i, asSingleDataType(determinedDataType), asSingleDataType(dataType)) + } + } else { + if !isRef { + return nil, fmt.Errorf("element [%d]: mismatched data type - '%s' expected, got reference", + i, asSingleDataType(determinedDataType)) + } + refDataTypes = append(refDataTypes, refDataType) + } + } + if len(refDataTypes) > 0 { + return refDataTypes, nil + } + return []schema.DataType{determinedDataType}, nil + case nil: + return fallbackDataType, nil + default: + allowed := []string{ + schema.DataTypeText.String(), + schema.DataTypeNumber.String(), + schema.DataTypeInt.String(), + schema.DataTypeBoolean.String(), + schema.DataTypeDate.String(), + schema.DataTypeUUID.String(), + schema.DataTypeObject.String(), + } + if !ofNestedProp { + allowed = append(allowed, schema.DataTypePhoneNumber.String(), schema.DataTypeGeoCoordinates.String()) + } + return nil, fmt.Errorf("unrecognized data type of value '%v' - one of '%s' expected", + typedValue, strings.Join(allowed, "', '")) + } +} + +func asSingleDataType(arrayDataType schema.DataType) schema.DataType { + if dt, isArray := schema.IsArrayType(arrayDataType); isArray { + return dt + } + return arrayDataType +} + +func (m *AutoSchemaManager) determineArrayType(value interface{}, ofNestedProp bool, +) (schema.DataType, schema.DataType, error) { + switch typedValue := value.(type) { + case string: + if _, err := time.Parse(time.RFC3339, typedValue); err == nil { + return schema.DataTypeDateArray, "", nil + } + if _, err := uuid.Parse(typedValue); err == nil { + return schema.DataTypeUUIDArray, "", nil + } + if schema.DataType(m.config.DefaultString) == schema.DataTypeString { + return schema.DataTypeStringArray, "", nil + } + return schema.DataTypeTextArray, "", nil + case json.Number: + if schema.DataType(m.config.DefaultNumber) == schema.DataTypeInt { + return schema.DataTypeIntArray, "", nil + } + return schema.DataTypeNumberArray, "", nil + case float64: + return schema.DataTypeNumberArray, "", nil + case int64: + return schema.DataTypeIntArray, "", nil + case bool: + return schema.DataTypeBooleanArray, "", nil + case map[string]interface{}: + if ofNestedProp { + return schema.DataTypeObjectArray, "", nil + } + if refDataType, ok := m.asRef(typedValue); ok { + return "", refDataType, nil + } + return schema.DataTypeObjectArray, "", nil + default: + allowed := []string{ + schema.DataTypeText.String(), + schema.DataTypeNumber.String(), + schema.DataTypeInt.String(), + schema.DataTypeBoolean.String(), + schema.DataTypeDate.String(), + schema.DataTypeUUID.String(), + schema.DataTypeObject.String(), + } + if !ofNestedProp { + allowed = append(allowed, schema.DataTypeCRef.String()) + } + return "", "", fmt.Errorf("unrecognized data type of value '%v' - one of '%s' expected", + typedValue, strings.Join(allowed, "', '")) + } +} + +func (m *AutoSchemaManager) asGeoCoordinatesType(val map[string]interface{}) ([]schema.DataType, bool) { + if len(val) == 2 { + if val["latitude"] != nil && val["longitude"] != nil { + return []schema.DataType{schema.DataTypeGeoCoordinates}, true + } + } + return nil, false +} + +func (m *AutoSchemaManager) asPhoneNumber(val map[string]interface{}) ([]schema.DataType, bool) { + if val["input"] != nil { + if len(val) == 1 { + return []schema.DataType{schema.DataTypePhoneNumber}, true + } + if len(val) == 2 { + if _, ok := val["defaultCountry"]; ok { + return []schema.DataType{schema.DataTypePhoneNumber}, true + } + } + } + + return nil, false +} + +func (m *AutoSchemaManager) asRef(val map[string]interface{}) (schema.DataType, bool) { + if v, ok := val["beacon"]; ok { + if beacon, ok := v.(string); ok { + ref, err := crossref.Parse(beacon) + if err == nil { + if ref.Class == "" { + res, err := m.vectorRepo.ObjectByID(context.Background(), ref.TargetID, search.SelectProperties{}, additional.Properties{}, "") + if err == nil && res != nil { + return schema.DataType(res.ClassName), true + } + } else { + return schema.DataType(ref.Class), true + } + } + } + } + return "", false +} + +func (m *AutoSchemaManager) determineNestedProperties(values map[string]interface{}, now time.Time, +) ([]*models.NestedProperty, error) { + i := 0 + nestedProperties := make([]*models.NestedProperty, len(values)) + for name, value := range values { + np, err := m.determineNestedProperty(name, value, now) + if err != nil { + return nil, fmt.Errorf("nested property '%s': %w", name, err) + } + nestedProperties[i] = np + i++ + } + return nestedProperties, nil +} + +func (m *AutoSchemaManager) determineNestedProperty(name string, value interface{}, now time.Time, +) (*models.NestedProperty, error) { + dt, err := m.determineType(value, true) + if err != nil { + return nil, err + } + + var np []*models.NestedProperty + if len(dt) == 1 { + switch dt[0] { + case schema.DataTypeObject: + np, err = m.determineNestedProperties(value.(map[string]interface{}), now) + case schema.DataTypeObjectArray: + np, err = m.determineNestedPropertiesOfArray(value.([]interface{}), now) + default: + // do nothing + } + } + if err != nil { + return nil, err + } + + return &models.NestedProperty{ + Name: name, + DataType: m.getDataTypes(dt), + Description: "This nested property was generated by Weaviate's auto-schema feature on " + + now.Format(time.ANSIC), + NestedProperties: np, + }, nil +} + +func (m *AutoSchemaManager) determineNestedPropertiesOfArray(valArray []interface{}, now time.Time, +) ([]*models.NestedProperty, error) { + if len(valArray) == 0 { + return []*models.NestedProperty{}, nil + } + nestedProperties, err := m.determineNestedProperties(valArray[0].(map[string]interface{}), now) + if err != nil { + return nil, err + } + if len(valArray) == 1 { + return nestedProperties, nil + } + + nestedPropertiesIndexMap := make(map[string]int, len(nestedProperties)) + for index := range nestedProperties { + nestedPropertiesIndexMap[nestedProperties[index].Name] = index + } + + for i := 1; i < len(valArray); i++ { + values := valArray[i].(map[string]interface{}) + for name, value := range values { + index, ok := nestedPropertiesIndexMap[name] + if !ok { + np, err := m.determineNestedProperty(name, value, now) + if err != nil { + return nil, err + } + nestedPropertiesIndexMap[name] = len(nestedProperties) + nestedProperties = append(nestedProperties, np) + } else if _, isNested := schema.AsNested(nestedProperties[index].DataType); isNested { + np, err := m.determineNestedProperty(name, value, now) + if err != nil { + return nil, err + } + if mergedNestedProperties, merged := schema.MergeRecursivelyNestedProperties( + nestedProperties[index].NestedProperties, np.NestedProperties, + ); merged { + nestedProperties[index].NestedProperties = mergedNestedProperties + } + } + } + } + + return nestedProperties, nil +} + +func (m *AutoSchemaManager) autoTenants(ctx context.Context, + principal *models.Principal, objects []*models.Object, fetchedClasses map[string]versioned.Class, +) (uint64, int, error) { + start := time.Now() + defer func() { + m.opsDuration.With(prometheus.Labels{ + "operation": "total", + }).Observe(time.Since(start).Seconds()) + }() + + classTenants := make(map[string]map[string]struct{}) + + // group by tenants by class + for _, obj := range objects { + if _, ok := classTenants[obj.Class]; !ok { + classTenants[obj.Class] = map[string]struct{}{} + } + classTenants[obj.Class][obj.Tenant] = struct{}{} + } + + totalTenants := 0 + // skip invalid classes, non-MT classes, no auto tenant creation classes + var maxSchemaVersion uint64 + for className, tenantNames := range classTenants { + vclass, exists := fetchedClasses[className] + if !exists || // invalid class + vclass.Class == nil { // class is nil + continue + } + totalTenants += len(tenantNames) + + if !schema.MultiTenancyEnabled(vclass.Class) || // non-MT class + !vclass.Class.MultiTenancyConfig.AutoTenantCreation { // no auto tenant creation + continue + } + names := make([]string, len(tenantNames)) + tenants := make([]*models.Tenant, len(tenantNames)) + i := 0 + for name := range tenantNames { + names[i] = name + tenants[i] = &models.Tenant{Name: name} + i++ + } + err := m.authorizer.Authorize(ctx, principal, authorization.CREATE, authorization.ShardsMetadata(className, names...)...) + if err != nil { + return 0, totalTenants, fmt.Errorf("add tenants because can't create collection: %w", err) + } + + addStart := time.Now() + if err := m.addTenants(ctx, principal, className, tenants); err != nil { + return 0, totalTenants, fmt.Errorf("add tenants to class %q: %w", className, err) + } + m.tenantsCount.Add(float64(len(tenants))) + m.opsDuration.With(prometheus.Labels{ + "operation": "add", + }).Observe(time.Since(addStart).Seconds()) + + if vclass.Version > maxSchemaVersion { + maxSchemaVersion = vclass.Version + } + } + + if totalTenants == 0 { + // if we exclusively hit non-MT classes, count them as a single tenant + totalTenants = 1 + } + + return maxSchemaVersion, totalTenants, nil +} + +func (m *AutoSchemaManager) addTenants(ctx context.Context, principal *models.Principal, + class string, tenants []*models.Tenant, +) error { + if len(tenants) == 0 { + return fmt.Errorf( + "tenants must be included for multitenant-enabled class %q", class) + } + version, err := m.schemaManager.AddTenants(ctx, principal, class, tenants) + if err != nil { + return err + } + + err = m.schemaManager.WaitForUpdate(ctx, version) + if err != nil { + return fmt.Errorf("could not wait for update: %w", err) + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/auto_schema_test.go b/platform/dbops/binaries/weaviate-src/usecases/objects/auto_schema_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7163159ad891516e0aa480afd239fb4622322035 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/auto_schema_test.go @@ -0,0 +1,1712 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "testing" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/test_utils" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/versioned" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/config/runtime" + "github.com/weaviate/weaviate/usecases/objects/validation" +) + +func Test_autoSchemaManager_determineType(t *testing.T) { + type fields struct { + config config.AutoSchema + } + type args struct { + value interface{} + } + + autoSchemaEnabledFields := fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + }, + } + + tests := []struct { + name string + fields fields + args args + want []schema.DataType + errMsgs []string + }{ + { + name: "fail determining type of nested array (1)", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + DefaultString: schema.DataTypeText.String(), + }, + }, + args: args{ + value: []interface{}{[]interface{}{"panic"}}, + }, + errMsgs: []string{"unrecognized data type"}, + }, + { + name: "fail determining type of nested array (2)", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + DefaultString: schema.DataTypeText.String(), + }, + }, + args: args{ + value: []interface{}{[]string{}}, + }, + errMsgs: []string{"unrecognized data type"}, + }, + { + name: "fail determining type of mixed elements in array", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + DefaultString: schema.DataTypeText.String(), + }, + }, + args: args{ + value: []interface{}{"something", false}, + }, + errMsgs: []string{"mismatched data type", "'text' expected, got 'boolean'"}, + }, + { + name: "fail determining type of mixed refs and objects (1)", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + }, + }, + args: args{ + value: []interface{}{ + map[string]interface{}{"beacon": "weaviate://localhost/df48b9f6-ba48-470c-bf6a-57657cb07390"}, + map[string]interface{}{"propOfObject": "something"}, + }, + }, + errMsgs: []string{"mismatched data type", "reference expected, got 'object'"}, + }, + { + name: "fail determining type of mixed refs and objects (2)", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + }, + }, + args: args{ + value: []interface{}{ + map[string]interface{}{"propOfObject": "something"}, + map[string]interface{}{"beacon": "weaviate://localhost/df48b9f6-ba48-470c-bf6a-57657cb07390"}, + }, + }, + errMsgs: []string{"mismatched data type", "'object' expected, got reference"}, + }, + { + name: "determine text", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + DefaultString: schema.DataTypeText.String(), + }, + }, + args: args{ + value: "string", + }, + want: []schema.DataType{schema.DataTypeText}, + }, + { + name: "determine text (implicit)", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + }, + }, + args: args{ + value: "string", + }, + want: []schema.DataType{schema.DataTypeText}, + }, + { + name: "determine date", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + DefaultDate: "date", + }, + }, + args: args{ + value: "2002-10-02T15:00:00Z", + }, + want: []schema.DataType{schema.DataTypeDate}, + }, + { + name: "determine uuid (1)", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + }, + }, + args: args{ + value: "5b2cbe85-c38a-41f7-9e8c-7406ff6d15aa", + }, + want: []schema.DataType{schema.DataTypeUUID}, + }, + { + name: "determine uuid (2)", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + }, + }, + args: args{ + value: "5b2cbe85c38a41f79e8c7406ff6d15aa", + }, + want: []schema.DataType{schema.DataTypeUUID}, + }, + { + name: "determine int", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + DefaultNumber: "int", + }, + }, + args: args{ + value: json.Number("1"), + }, + want: []schema.DataType{schema.DataTypeInt}, + }, + { + name: "determine number", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + DefaultNumber: "number", + }, + }, + args: args{ + value: json.Number("1"), + }, + want: []schema.DataType{schema.DataTypeNumber}, + }, + { + name: "determine boolean", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + DefaultNumber: "number", + }, + }, + args: args{ + value: true, + }, + want: []schema.DataType{schema.DataTypeBoolean}, + }, + { + name: "determine geoCoordinates", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + }, + }, + args: args{ + value: map[string]interface{}{ + "latitude": json.Number("1.1"), + "longitude": json.Number("1.1"), + }, + }, + want: []schema.DataType{schema.DataTypeGeoCoordinates}, + }, + { + name: "determine phoneNumber", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + }, + }, + args: args{ + value: map[string]interface{}{ + "input": "020 1234567", + }, + }, + want: []schema.DataType{schema.DataTypePhoneNumber}, + }, + { + name: "determine phoneNumber (2)", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + }, + }, + args: args{ + value: map[string]interface{}{ + "input": "020 1234567", + "defaultCountry": "nl", + }, + }, + want: []schema.DataType{schema.DataTypePhoneNumber}, + }, + { + name: "determine cross reference", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + }, + }, + args: args{ + value: []interface{}{ + map[string]interface{}{"beacon": "weaviate://localhost/df48b9f6-ba48-470c-bf6a-57657cb07390"}, + }, + }, + want: []schema.DataType{schema.DataType("Publication")}, + }, + { + name: "determine cross references", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + }, + }, + args: args{ + value: []interface{}{ + map[string]interface{}{"beacon": "weaviate://localhost/df48b9f6-ba48-470c-bf6a-57657cb07390"}, + map[string]interface{}{"beacon": "weaviate://localhost/df48b9f6-ba48-470c-bf6a-57657cb07391"}, + }, + }, + want: []schema.DataType{schema.DataType("Publication"), schema.DataType("Article")}, + }, + { + name: "determine text array", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + DefaultString: schema.DataTypeText.String(), + }, + }, + args: args{ + value: []interface{}{"a", "b"}, + }, + want: []schema.DataType{schema.DataTypeTextArray}, + }, + { + name: "determine text array (implicit)", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + }, + }, + args: args{ + value: []interface{}{"a", "b"}, + }, + want: []schema.DataType{schema.DataTypeTextArray}, + }, + { + name: "determine int array", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + DefaultNumber: "int", + }, + }, + args: args{ + value: []interface{}{json.Number("11"), json.Number("12")}, + }, + want: []schema.DataType{schema.DataTypeIntArray}, + }, + { + name: "determine number array", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + DefaultNumber: "number", + }, + }, + args: args{ + value: []interface{}{json.Number("1.1"), json.Number("1.2")}, + }, + want: []schema.DataType{schema.DataTypeNumberArray}, + }, + { + name: "determine boolean array", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + }, + }, + args: args{ + value: []interface{}{true, false}, + }, + want: []schema.DataType{schema.DataTypeBooleanArray}, + }, + { + name: "determine date array", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + DefaultDate: "date", + }, + }, + args: args{ + value: []interface{}{"2002-10-02T15:00:00Z", "2002-10-02T15:01:00Z"}, + }, + want: []schema.DataType{schema.DataTypeDateArray}, + }, + { + name: "determine uuid array (1)", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + }, + }, + args: args{ + value: []interface{}{ + "5b2cbe85-c38a-41f7-9e8c-7406ff6d15aa", + "57a8564d-089b-4cd9-be39-56681605e0da", + }, + }, + want: []schema.DataType{schema.DataTypeUUIDArray}, + }, + { + name: "determine uuid array (2)", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + }, + }, + args: args{ + value: []interface{}{ + "5b2cbe85c38a41f79e8c7406ff6d15aa", + "57a8564d089b4cd9be3956681605e0da", + }, + }, + want: []schema.DataType{schema.DataTypeUUIDArray}, + }, + { + name: "determine mixed string arrays, string first", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + }, + }, + args: args{ + value: []interface{}{ + "string", + "57a8564d089b4cd9be3956681605e0da", + }, + }, + want: []schema.DataType{schema.DataTypeTextArray}, + }, + { + name: "determine mixed string/UUID arrays, string later", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + }, + }, + args: args{ + value: []interface{}{ + "57a8564d089b4cd9be3956681605e0da", + "string", + }, + }, + want: []schema.DataType{schema.DataTypeTextArray}, + }, + { + name: "determine mixed string/date arrays, string later", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + }, + }, + args: args{ + value: []interface{}{ + "2002-10-02T15:00:00Z", + "string", + }, + }, + want: []schema.DataType{schema.DataTypeTextArray}, + }, + { + name: "[deprecated string] determine string", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + DefaultString: schema.DataTypeString.String(), + }, + }, + args: args{ + value: "string", + }, + want: []schema.DataType{schema.DataTypeString}, + }, + { + name: "[deprecated string] determine string array", + fields: fields{ + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + DefaultString: schema.DataTypeString.String(), + }, + }, + args: args{ + value: []interface{}{"a", "b"}, + }, + want: []schema.DataType{schema.DataTypeStringArray}, + }, + { + name: "determine object", + fields: autoSchemaEnabledFields, + args: args{ + value: map[string]interface{}{ + "some_number": 1.23, + "some_bool": false, + }, + }, + want: []schema.DataType{schema.DataTypeObject}, + }, + { + name: "determine object array", + fields: autoSchemaEnabledFields, + args: args{ + value: []interface{}{ + map[string]interface{}{ + "some_number": 1.23, + "some_bool": false, + }, + }, + }, + want: []schema.DataType{schema.DataTypeObjectArray}, + }, + { + name: "determine object, not geoCoordinates (too few props 1)", + fields: autoSchemaEnabledFields, + args: args{ + value: map[string]interface{}{ + "latitude": json.Number("1.1"), + }, + }, + want: []schema.DataType{schema.DataTypeObject}, + }, + { + name: "determine object, not geoCoordinates (too few props 2)", + fields: autoSchemaEnabledFields, + args: args{ + value: map[string]interface{}{ + "longitude": json.Number("1.1"), + }, + }, + want: []schema.DataType{schema.DataTypeObject}, + }, + { + name: "determine object, not geoCoordinates (too many props)", + fields: autoSchemaEnabledFields, + args: args{ + value: map[string]interface{}{ + "latitude": json.Number("1.1"), + "longitude": json.Number("1.1"), + "unrelevant": "some text", + }, + }, + want: []schema.DataType{schema.DataTypeObject}, + }, + { + name: "determine object, not phoneNumber (too few props)", + fields: autoSchemaEnabledFields, + args: args{ + value: map[string]interface{}{ + "defaultCountry": "nl", + }, + }, + want: []schema.DataType{schema.DataTypeObject}, + }, + { + name: "determine object, not phoneNumber (too many props)", + fields: autoSchemaEnabledFields, + args: args{ + value: map[string]interface{}{ + "input": "020 1234567", + "defaultCountry": "nl", + "internationalFormatted": "+31 20 1234567", + "countryCode": 31, + "national": 201234567, + "nationalFormatted": "020 1234567", + "valid": true, + }, + }, + want: []schema.DataType{schema.DataTypeObject}, + }, + } + for _, tt := range tests { + vectorRepo := &fakeVectorRepo{} + vectorRepo.On("ObjectByID", strfmt.UUID("df48b9f6-ba48-470c-bf6a-57657cb07390"), mock.Anything, mock.Anything, mock.Anything). + Return(&search.Result{ClassName: "Publication"}, nil).Once() + vectorRepo.On("ObjectByID", strfmt.UUID("df48b9f6-ba48-470c-bf6a-57657cb07391"), mock.Anything, mock.Anything, mock.Anything). + Return(&search.Result{ClassName: "Article"}, nil).Once() + m := &AutoSchemaManager{ + schemaManager: &fakeSchemaManager{}, + vectorRepo: vectorRepo, + config: tt.fields.config, + } + t.Run(tt.name, func(t *testing.T) { + got, err := m.determineType(tt.args.value, false) + if len(tt.errMsgs) == 0 { + require.NoError(t, err) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("autoSchemaManager.determineType() = %v, want %v", got, tt.want) + } + } else { + for _, errMsg := range tt.errMsgs { + require.ErrorContains(t, err, errMsg) + } + assert.Nil(t, got) + } + }) + } +} + +func Test_autoSchemaManager_autoSchema_emptyRequest(t *testing.T) { + // given + vectorRepo := &fakeVectorRepo{} + vectorRepo.On("ObjectByID", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&search.Result{ClassName: "Publication"}, nil).Once() + schemaManager := &fakeSchemaManager{} + logger, _ := test.NewNullLogger() + autoSchemaManager := &AutoSchemaManager{ + schemaManager: schemaManager, + vectorRepo: vectorRepo, + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + DefaultString: schema.DataTypeText.String(), + DefaultNumber: "number", + DefaultDate: "date", + }, + authorizer: fakeAuthorizer{}, + logger: logger, + } + + var obj *models.Object + + knownClasses := map[string]versioned.Class{} + + _, err := autoSchemaManager.autoSchema(context.Background(), &models.Principal{}, true, knownClasses, obj) + assert.EqualError(t, fmt.Errorf(validation.ErrorMissingObject), err.Error()) +} + +func Test_autoSchemaManager_autoSchema_create(t *testing.T) { + // given + vectorRepo := &fakeVectorRepo{} + vectorRepo.On("ObjectByID", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&search.Result{ClassName: "Publication"}, nil).Once() + schemaManager := &fakeSchemaManager{} + logger, _ := test.NewNullLogger() + autoSchemaManager := &AutoSchemaManager{ + schemaManager: schemaManager, + vectorRepo: vectorRepo, + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + DefaultString: schema.DataTypeText.String(), + DefaultNumber: "number", + DefaultDate: "date", + }, + authorizer: fakeAuthorizer{}, + logger: logger, + } + obj := &models.Object{ + Class: "Publication", + Properties: map[string]interface{}{ + "name": "Jodie Sparrow", + "age": json.Number("30"), + "publicationDate": "2002-10-02T15:00:00Z", + "textArray": []interface{}{"a", "b"}, + "numberArray": []interface{}{json.Number("30")}, + }, + } + knownClasses := map[string]versioned.Class{} + + // when + schemaBefore := schemaManager.GetSchemaResponse + _, err := autoSchemaManager.autoSchema(context.Background(), &models.Principal{}, true, knownClasses, obj) + schemaAfter := schemaManager.GetSchemaResponse + + // then + require.Nil(t, schemaBefore.Objects) + require.Nil(t, err) + require.NotNil(t, schemaAfter.Objects) + assert.Equal(t, 1, len(schemaAfter.Objects.Classes)) + + class := (schemaAfter.Objects.Classes)[0] + assert.Equal(t, "Publication", class.Class) + assert.Equal(t, 5, len(class.Properties)) + require.NotNil(t, getProperty(class.Properties, "name")) + assert.Equal(t, "name", getProperty(class.Properties, "name").Name) + assert.Equal(t, "text", getProperty(class.Properties, "name").DataType[0]) + require.NotNil(t, getProperty(class.Properties, "age")) + assert.Equal(t, "age", getProperty(class.Properties, "age").Name) + assert.Equal(t, "number", getProperty(class.Properties, "age").DataType[0]) + require.NotNil(t, getProperty(class.Properties, "publicationDate")) + assert.Equal(t, "publicationDate", getProperty(class.Properties, "publicationDate").Name) + assert.Equal(t, "date", getProperty(class.Properties, "publicationDate").DataType[0]) + require.NotNil(t, getProperty(class.Properties, "textArray")) + assert.Equal(t, "textArray", getProperty(class.Properties, "textArray").Name) + assert.Equal(t, "text[]", getProperty(class.Properties, "textArray").DataType[0]) + require.NotNil(t, getProperty(class.Properties, "numberArray")) + assert.Equal(t, "numberArray", getProperty(class.Properties, "numberArray").Name) + assert.Equal(t, "number[]", getProperty(class.Properties, "numberArray").DataType[0]) + assert.Equal(t, "hnsw", class.VectorIndexType) + assert.Equal(t, "none", class.Vectorizer) + assert.NotNil(t, class.VectorIndexConfig) +} + +func Test_autoSchemaManager_autoSchema_update(t *testing.T) { + // given + vectorRepo := &fakeVectorRepo{} + vectorRepo.On("ObjectByID", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&search.Result{ClassName: "Publication"}, nil).Once() + logger, _ := test.NewNullLogger() + + class := &models.Class{ + Class: "Publication", + Properties: []*models.Property{ + { + Name: "age", + DataType: []string{"int"}, + }, + }, + } + schemaManager := &fakeSchemaManager{ + GetSchemaResponse: schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + }, + } + autoSchemaManager := &AutoSchemaManager{ + schemaManager: schemaManager, + vectorRepo: vectorRepo, + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + DefaultString: schema.DataTypeText.String(), + DefaultNumber: "int", + DefaultDate: "date", + }, + authorizer: fakeAuthorizer{}, + logger: logger, + } + obj := &models.Object{ + Class: "Publication", + Properties: map[string]interface{}{ + "name": "Jodie Sparrow", + "age": json.Number("30"), + "publicationDate": "2002-10-02T15:00:00Z", + "textArray": []interface{}{"a", "b"}, + "numberArray": []interface{}{json.Number("30")}, + }, + } + // when + // then + schemaBefore := schemaManager.GetSchemaResponse + require.NotNil(t, schemaBefore.Objects) + assert.Equal(t, 1, len(schemaBefore.Objects.Classes)) + assert.Equal(t, "Publication", (schemaBefore.Objects.Classes)[0].Class) + assert.Equal(t, 1, len((schemaBefore.Objects.Classes)[0].Properties)) + assert.Equal(t, "age", (schemaBefore.Objects.Classes)[0].Properties[0].Name) + assert.Equal(t, "int", (schemaBefore.Objects.Classes)[0].Properties[0].DataType[0]) + + knownClasses := map[string]versioned.Class{ + class.Class: {Version: 0, Class: class}, + } + + _, err := autoSchemaManager.autoSchema(context.Background(), &models.Principal{}, true, knownClasses, obj) + require.Nil(t, err) + + schemaAfter := schemaManager.GetSchemaResponse + require.NotNil(t, schemaAfter.Objects) + assert.Equal(t, 1, len(schemaAfter.Objects.Classes)) + assert.Equal(t, "Publication", (schemaAfter.Objects.Classes)[0].Class) + assert.Equal(t, 5, len((schemaAfter.Objects.Classes)[0].Properties)) + require.NotNil(t, getProperty((schemaAfter.Objects.Classes)[0].Properties, "age")) + assert.Equal(t, "age", getProperty((schemaAfter.Objects.Classes)[0].Properties, "age").Name) + assert.Equal(t, "int", getProperty((schemaAfter.Objects.Classes)[0].Properties, "age").DataType[0]) + require.NotNil(t, getProperty((schemaAfter.Objects.Classes)[0].Properties, "name")) + assert.Equal(t, "name", getProperty((schemaAfter.Objects.Classes)[0].Properties, "name").Name) + assert.Equal(t, "text", getProperty((schemaAfter.Objects.Classes)[0].Properties, "name").DataType[0]) + require.NotNil(t, getProperty((schemaAfter.Objects.Classes)[0].Properties, "publicationDate")) + assert.Equal(t, "publicationDate", getProperty((schemaAfter.Objects.Classes)[0].Properties, "publicationDate").Name) + assert.Equal(t, "date", getProperty((schemaAfter.Objects.Classes)[0].Properties, "publicationDate").DataType[0]) + require.NotNil(t, getProperty((schemaAfter.Objects.Classes)[0].Properties, "textArray")) + assert.Equal(t, "textArray", getProperty((schemaAfter.Objects.Classes)[0].Properties, "textArray").Name) + assert.Equal(t, "text[]", getProperty((schemaAfter.Objects.Classes)[0].Properties, "textArray").DataType[0]) + require.NotNil(t, getProperty((schemaAfter.Objects.Classes)[0].Properties, "numberArray")) + assert.Equal(t, "numberArray", getProperty((schemaAfter.Objects.Classes)[0].Properties, "numberArray").Name) + assert.Equal(t, "int[]", getProperty((schemaAfter.Objects.Classes)[0].Properties, "numberArray").DataType[0]) +} + +func Test_autoSchemaManager_getProperties(t *testing.T) { + type testCase struct { + name string + valProperties map[string]interface{} + expectedProperties []*models.Property + } + + testCases := []testCase{ + { + name: "mixed 1", + valProperties: map[string]interface{}{ + "name": "someName", + "objectProperty": map[string]interface{}{ + "nested_int": json.Number("123"), + "nested_text": "some text", + "nested_objects": []interface{}{ + map[string]interface{}{ + "nested_bool_lvl2": false, + "nested_numbers_lvl2": []interface{}{ + json.Number("11.11"), + }, + }, + }, + }, + }, + expectedProperties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "objectProperty", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_int", + DataType: schema.DataTypeNumber.PropString(), + }, + { + Name: "nested_text", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_bool_lvl2", + DataType: schema.DataTypeBoolean.PropString(), + }, + { + Name: "nested_numbers_lvl2", + DataType: schema.DataTypeNumberArray.PropString(), + }, + }, + }, + }, + }, + }, + }, + { + name: "mixed 2", + valProperties: map[string]interface{}{ + "name": "someName", + "objectProperty": map[string]interface{}{ + "nested_number": json.Number("123"), + "nested_text": "some text", + "nested_objects": []interface{}{ + map[string]interface{}{ + "nested_date_lvl2": "2022-01-01T00:00:00+02:00", + "nested_numbers_lvl2": []interface{}{ + json.Number("11.11"), + }, + }, + }, + }, + }, + expectedProperties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "objectProperty", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_number", + DataType: schema.DataTypeNumber.PropString(), + }, + { + Name: "nested_text", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_date_lvl2", + DataType: schema.DataTypeDate.PropString(), + }, + { + Name: "nested_numbers_lvl2", + DataType: schema.DataTypeNumberArray.PropString(), + }, + }, + }, + }, + }, + }, + }, + { + name: "ref", + valProperties: map[string]interface{}{ + "name": "someName", + "objectProperty": map[string]interface{}{ + "nested_ref_wannabe": []interface{}{ + map[string]interface{}{ + "beacon": "weaviate://localhost/Soup/8c156d37-81aa-4ce9-a811-621e2702b825", + }, + }, + "nested_objects": []interface{}{ + map[string]interface{}{ + "nested_ref_wannabe_lvl2": []interface{}{ + map[string]interface{}{ + "beacon": "weaviate://localhost/Soup/8c156d37-81aa-4ce9-a811-621e2702b825", + }, + }, + }, + }, + }, + "ref": []interface{}{ + map[string]interface{}{ + "beacon": "weaviate://localhost/Soup/8c156d37-81aa-4ce9-a811-621e2702b825", + }, + }, + }, + expectedProperties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "objectProperty", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_ref_wannabe", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "beacon", + DataType: schema.DataTypeText.PropString(), + }, + }, + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_ref_wannabe_lvl2", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "beacon", + DataType: schema.DataTypeText.PropString(), + }, + }, + }, + }, + }, + }, + }, + { + Name: "ref", + DataType: []string{"Soup"}, + }, + }, + }, + { + name: "phone", + valProperties: map[string]interface{}{ + "name": "someName", + "objectProperty": map[string]interface{}{ + "nested_phone_wannabe": map[string]interface{}{ + "input": "020 1234567", + "defaultCountry": "nl", + }, + "nested_phone_wannabes": []interface{}{ + map[string]interface{}{ + "input": "020 1234567", + "defaultCountry": "nl", + }, + }, + "nested_objects": []interface{}{ + map[string]interface{}{ + "nested_phone_wannabe_lvl2": map[string]interface{}{ + "input": "020 1234567", + "defaultCountry": "nl", + }, + "nested_phone_wannabes_lvl2": []interface{}{ + map[string]interface{}{ + "input": "020 1234567", + "defaultCountry": "nl", + }, + }, + }, + }, + }, + "phone": map[string]interface{}{ + "input": "020 1234567", + "defaultCountry": "nl", + }, + "phone_wannabes": []interface{}{ + map[string]interface{}{ + "input": "020 1234567", + "defaultCountry": "nl", + }, + }, + }, + expectedProperties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "objectProperty", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_phone_wannabe", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "input", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "defaultCountry", + DataType: schema.DataTypeText.PropString(), + }, + }, + }, + { + Name: "nested_phone_wannabes", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "input", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "defaultCountry", + DataType: schema.DataTypeText.PropString(), + }, + }, + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_phone_wannabe_lvl2", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "input", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "defaultCountry", + DataType: schema.DataTypeText.PropString(), + }, + }, + }, + { + Name: "nested_phone_wannabes_lvl2", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "input", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "defaultCountry", + DataType: schema.DataTypeText.PropString(), + }, + }, + }, + }, + }, + }, + }, + { + Name: "phone", + DataType: schema.DataTypePhoneNumber.PropString(), + }, + { + Name: "phone_wannabes", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "input", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "defaultCountry", + DataType: schema.DataTypeText.PropString(), + }, + }, + }, + }, + }, + { + name: "geo", + valProperties: map[string]interface{}{ + "name": "someName", + "objectProperty": map[string]interface{}{ + "nested_geo_wannabe": map[string]interface{}{ + "latitude": json.Number("1.1"), + "longitude": json.Number("2.2"), + }, + "nested_geo_wannabes": []interface{}{ + map[string]interface{}{ + "latitude": json.Number("1.1"), + "longitude": json.Number("2.2"), + }, + }, + "nested_objects": []interface{}{ + map[string]interface{}{ + "nested_geo_wannabe_lvl2": map[string]interface{}{ + "latitude": json.Number("1.1"), + "longitude": json.Number("2.2"), + }, + "nested_geo_wannabes_lvl2": []interface{}{ + map[string]interface{}{ + "latitude": json.Number("1.1"), + "longitude": json.Number("2.2"), + }, + }, + }, + }, + }, + "geo": map[string]interface{}{ + "latitude": json.Number("1.1"), + "longitude": json.Number("2.2"), + }, + "geo_wannabes": []interface{}{ + map[string]interface{}{ + "latitude": json.Number("1.1"), + "longitude": json.Number("2.2"), + }, + }, + }, + expectedProperties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "objectProperty", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_geo_wannabe", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "latitude", + DataType: schema.DataTypeNumber.PropString(), + }, + { + Name: "longitude", + DataType: schema.DataTypeNumber.PropString(), + }, + }, + }, + { + Name: "nested_geo_wannabes", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "latitude", + DataType: schema.DataTypeNumber.PropString(), + }, + { + Name: "longitude", + DataType: schema.DataTypeNumber.PropString(), + }, + }, + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_geo_wannabe_lvl2", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "latitude", + DataType: schema.DataTypeNumber.PropString(), + }, + { + Name: "longitude", + DataType: schema.DataTypeNumber.PropString(), + }, + }, + }, + { + Name: "nested_geo_wannabes_lvl2", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "latitude", + DataType: schema.DataTypeNumber.PropString(), + }, + { + Name: "longitude", + DataType: schema.DataTypeNumber.PropString(), + }, + }, + }, + }, + }, + }, + }, + { + Name: "geo", + DataType: schema.DataTypeGeoCoordinates.PropString(), + }, + { + Name: "geo_wannabes", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "latitude", + DataType: schema.DataTypeNumber.PropString(), + }, + { + Name: "longitude", + DataType: schema.DataTypeNumber.PropString(), + }, + }, + }, + }, + }, + { + name: "case more than 1 item in slice", + valProperties: map[string]interface{}{ + "name": "someName", + "objectProperty": map[string]interface{}{ + "nested_int": json.Number("123"), + "nested_text": "some text", + "nested_objects": []interface{}{ + map[string]interface{}{ + "nested_bool_lvl2": false, + "nested_numbers_lvl2": []interface{}{ + json.Number("11.11"), + float64(100), + }, + }, + map[string]interface{}{ + "nested_bool_lvl3": false, + "nested_numbers_lvl3": []interface{}{ + float64(100), + }, + }, + }, + }, + }, + expectedProperties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "objectProperty", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_int", + DataType: schema.DataTypeNumber.PropString(), + }, + { + Name: "nested_text", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_bool_lvl2", + DataType: schema.DataTypeBoolean.PropString(), + }, + { + Name: "nested_numbers_lvl2", + DataType: schema.DataTypeNumberArray.PropString(), + }, + { + Name: "nested_bool_lvl3", + DataType: schema.DataTypeBoolean.PropString(), + }, + { + Name: "nested_numbers_lvl3", + DataType: schema.DataTypeNumberArray.PropString(), + }, + }, + }, + }, + }, + }, + }, + } + + manager := &AutoSchemaManager{ + schemaManager: &fakeSchemaManager{}, + vectorRepo: &fakeVectorRepo{}, + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + DefaultNumber: schema.DataTypeNumber.String(), + DefaultString: schema.DataTypeText.String(), + DefaultDate: schema.DataTypeDate.String(), + }, + authorizer: fakeAuthorizer{}, + } + + for i, tc := range testCases { + t.Run(fmt.Sprintf("testCase_%d", i), func(t *testing.T) { + properties, _ := manager.getProperties(&models.Object{ + Class: "ClassWithObjectProps", + Properties: tc.valProperties, + }) + + assertPropsMatch(t, tc.expectedProperties, properties) + }) + } +} + +func Test_autoSchemaManager_perform_withNested(t *testing.T) { + logger, _ := test.NewNullLogger() + className := "ClassWithObjectProps" + + class := &models.Class{ + Class: className, + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "objectProperty", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_int", + DataType: schema.DataTypeNumber.PropString(), + }, + { + Name: "nested_text", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_bool_lvl2", + DataType: schema.DataTypeBoolean.PropString(), + }, + { + Name: "nested_numbers_lvl2", + DataType: schema.DataTypeNumberArray.PropString(), + }, + }, + }, + }, + }, + }, + } + object := &models.Object{ + Class: className, + Properties: map[string]interface{}{ + "name": "someName", + "objectProperty": map[string]interface{}{ + "nested_number": json.Number("123"), + "nested_text": "some text", + "nested_objects": []interface{}{ + map[string]interface{}{ + "nested_date_lvl2": "2022-01-01T00:00:00+02:00", + "nested_numbers_lvl2": []interface{}{ + json.Number("11.11"), + }, + "nested_phone_wannabe_lvl2": map[string]interface{}{ + "input": "020 1234567", + "defaultCountry": "nl", + }, + "nested_phone_wannabes_lvl2": []interface{}{ + map[string]interface{}{ + "input": "020 1234567", + "defaultCountry": "nl", + }, + }, + }, + }, + "nested_phone_wannabe": map[string]interface{}{ + "input": "020 1234567", + "defaultCountry": "nl", + }, + "nested_phone_wannabes": []interface{}{ + map[string]interface{}{ + "input": "020 1234567", + "defaultCountry": "nl", + }, + }, + }, + "phone": map[string]interface{}{ + "input": "020 1234567", + "defaultCountry": "nl", + }, + "phone_wannabes": []interface{}{ + map[string]interface{}{ + "input": "020 1234567", + "defaultCountry": "nl", + }, + }, + "objectPropertyGeo": map[string]interface{}{ + "nested_objects": []interface{}{ + map[string]interface{}{ + "nested_geo_wannabe_lvl2": map[string]interface{}{ + "latitude": json.Number("1.1"), + "longitude": json.Number("2.2"), + }, + "nested_geo_wannabes_lvl2": []interface{}{ + map[string]interface{}{ + "latitude": json.Number("1.1"), + "longitude": json.Number("2.2"), + }, + }, + }, + }, + "nested_geo_wannabe": map[string]interface{}{ + "latitude": json.Number("1.1"), + "longitude": json.Number("2.2"), + }, + "nested_geo_wannabes": []interface{}{ + map[string]interface{}{ + "latitude": json.Number("1.1"), + "longitude": json.Number("2.2"), + }, + }, + }, + "geo": map[string]interface{}{ + "latitude": json.Number("1.1"), + "longitude": json.Number("2.2"), + }, + "geo_wannabes": []interface{}{ + map[string]interface{}{ + "latitude": json.Number("1.1"), + "longitude": json.Number("2.2"), + }, + }, + }, + } + expectedClass := &models.Class{ + Class: className, + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "objectProperty", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_int", + DataType: schema.DataTypeNumber.PropString(), + }, + { + Name: "nested_number", + DataType: schema.DataTypeNumber.PropString(), + }, + { + Name: "nested_text", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "nested_phone_wannabe", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "input", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "defaultCountry", + DataType: schema.DataTypeText.PropString(), + }, + }, + }, + { + Name: "nested_phone_wannabes", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "input", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "defaultCountry", + DataType: schema.DataTypeText.PropString(), + }, + }, + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_bool_lvl2", + DataType: schema.DataTypeBoolean.PropString(), + }, + { + Name: "nested_date_lvl2", + DataType: schema.DataTypeDate.PropString(), + }, + { + Name: "nested_numbers_lvl2", + DataType: schema.DataTypeNumberArray.PropString(), + }, + { + Name: "nested_phone_wannabe_lvl2", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "input", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "defaultCountry", + DataType: schema.DataTypeText.PropString(), + }, + }, + }, + { + Name: "nested_phone_wannabes_lvl2", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "input", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "defaultCountry", + DataType: schema.DataTypeText.PropString(), + }, + }, + }, + }, + }, + }, + }, + { + Name: "phone", + DataType: schema.DataTypePhoneNumber.PropString(), + }, + { + Name: "phone_wannabes", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "input", + DataType: schema.DataTypeText.PropString(), + }, + { + Name: "defaultCountry", + DataType: schema.DataTypeText.PropString(), + }, + }, + }, + { + Name: "objectPropertyGeo", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_geo_wannabe", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "latitude", + DataType: schema.DataTypeNumber.PropString(), + }, + { + Name: "longitude", + DataType: schema.DataTypeNumber.PropString(), + }, + }, + }, + { + Name: "nested_geo_wannabes", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "latitude", + DataType: schema.DataTypeNumber.PropString(), + }, + { + Name: "longitude", + DataType: schema.DataTypeNumber.PropString(), + }, + }, + }, + { + Name: "nested_objects", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "nested_geo_wannabe_lvl2", + DataType: schema.DataTypeObject.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "latitude", + DataType: schema.DataTypeNumber.PropString(), + }, + { + Name: "longitude", + DataType: schema.DataTypeNumber.PropString(), + }, + }, + }, + { + Name: "nested_geo_wannabes_lvl2", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "latitude", + DataType: schema.DataTypeNumber.PropString(), + }, + { + Name: "longitude", + DataType: schema.DataTypeNumber.PropString(), + }, + }, + }, + }, + }, + }, + }, + { + Name: "geo", + DataType: schema.DataTypeGeoCoordinates.PropString(), + }, + { + Name: "geo_wannabes", + DataType: schema.DataTypeObjectArray.PropString(), + NestedProperties: []*models.NestedProperty{ + { + Name: "latitude", + DataType: schema.DataTypeNumber.PropString(), + }, + { + Name: "longitude", + DataType: schema.DataTypeNumber.PropString(), + }, + }, + }, + }, + } + + schemaManager := &fakeSchemaManager{ + GetSchemaResponse: schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{class}, + }, + }, + } + manager := &AutoSchemaManager{ + schemaManager: schemaManager, + vectorRepo: &fakeVectorRepo{}, + config: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(true), + DefaultNumber: schema.DataTypeNumber.String(), + DefaultString: schema.DataTypeText.String(), + DefaultDate: schema.DataTypeDate.String(), + }, + logger: logger, + authorizer: fakeAuthorizer{}, + } + + knownClasses := map[string]versioned.Class{ + class.Class: {Version: 0, Class: class}, + } + + _, err := manager.autoSchema(context.Background(), &models.Principal{}, true, knownClasses, object) + require.NoError(t, err) + + schemaAfter := schemaManager.GetSchemaResponse + require.NotNil(t, schemaAfter.Objects) + require.Len(t, schemaAfter.Objects.Classes, 1) + require.Equal(t, className, schemaAfter.Objects.Classes[0].Class) + + assertPropsMatch(t, expectedClass.Properties, schemaAfter.Objects.Classes[0].Properties) +} + +func getProperty(properties []*models.Property, name string) *models.Property { + for _, prop := range properties { + if prop.Name == name { + return prop + } + } + return nil +} + +func assertPropsMatch(t *testing.T, propsA, propsB []*models.Property) { + require.Len(t, propsB, len(propsA), "props: different length") + + pMap := map[string]int{} + for index, p := range propsA { + pMap[p.Name] = index + } + + for _, pB := range propsB { + require.Contains(t, pMap, pB.Name) + pA := propsA[pMap[pB.Name]] + + assert.Equal(t, pA.DataType, pB.DataType) + test_utils.AssertNestedPropsMatch(t, pA.NestedProperties, pB.NestedProperties) + } +} + +type fakeAuthorizer struct{} + +func (f fakeAuthorizer) Authorize(ctx context.Context, _ *models.Principal, _ string, _ ...string) error { + return nil +} + +func (f fakeAuthorizer) AuthorizeSilent(ctx context.Context, _ *models.Principal, _ string, _ ...string) error { + return nil +} + +func (f fakeAuthorizer) FilterAuthorizedResources(ctx context.Context, principal *models.Principal, verb string, resources ...string) ([]string, error) { + return resources, nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/batch_add.go b/platform/dbops/binaries/weaviate-src/usecases/objects/batch_add.go new file mode 100644 index 0000000000000000000000000000000000000000..e7609a3821a03b8e7b360a864a0cd9f6e9933c77 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/batch_add.go @@ -0,0 +1,207 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/versioned" + + "github.com/google/uuid" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/classcache" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/objects/validation" +) + +var errEmptyObjects = NewErrInvalidUserInput("invalid param 'objects': cannot be empty, need at least one object for batching") + +// AddObjects Class Instances in batch to the connected DB +func (b *BatchManager) AddObjects(ctx context.Context, principal *models.Principal, + objects []*models.Object, fields []*string, repl *additional.ReplicationProperties, +) (BatchObjects, error) { + ctx = classcache.ContextWithClassCache(ctx) + + classesShards := make(map[string][]string) + for _, obj := range objects { + obj.Class = schema.UppercaseClassName(obj.Class) + cls, _ := b.resolveAlias(obj.Class) + obj.Class = cls + classesShards[obj.Class] = append(classesShards[obj.Class], obj.Tenant) + } + knownClasses := map[string]versioned.Class{} + + // whole request fails if permissions for any collection are not present + for className, shards := range classesShards { + // we don't leak any info that someone who inserts data does not have anyway + vClass, err := b.schemaManager.GetCachedClassNoAuth(ctx, className) + if err != nil { + return nil, err + } + knownClasses[className] = vClass[className] + + if err := b.authorizer.Authorize(ctx, principal, authorization.UPDATE, authorization.ShardsData(className, shards...)...); err != nil { + return nil, err + } + + if err := b.authorizer.Authorize(ctx, principal, authorization.CREATE, authorization.ShardsData(className, shards...)...); err != nil { + return nil, err + } + } + + return b.addObjects(ctx, principal, objects, repl, knownClasses) +} + +// AddObjectsGRPCAfterAuth bypasses the authentication in the REST endpoint as GRPC has its own checking +func (b *BatchManager) AddObjectsGRPCAfterAuth(ctx context.Context, principal *models.Principal, + objects []*models.Object, repl *additional.ReplicationProperties, fetchedClasses map[string]versioned.Class, +) (BatchObjects, error) { + return b.addObjects(ctx, principal, objects, repl, fetchedClasses) +} + +func (b *BatchManager) addObjects(ctx context.Context, principal *models.Principal, + objects []*models.Object, repl *additional.ReplicationProperties, fetchedClasses map[string]versioned.Class, +) (BatchObjects, error) { + ctx = classcache.ContextWithClassCache(ctx) + + before := time.Now() + b.metrics.BatchInc() + defer b.metrics.BatchOp("total_uc_level", before.UnixNano()) + defer b.metrics.BatchDec() + + beforePreProcessing := time.Now() + if len(objects) == 0 { + return nil, errEmptyObjects + } + + var maxSchemaVersion uint64 + batchObjects, maxSchemaVersion := b.validateAndGetVector(ctx, principal, objects, repl, fetchedClasses) + schemaVersion, tenantCount, err := b.autoSchemaManager.autoTenants(ctx, principal, objects, fetchedClasses) + if err != nil { + return nil, fmt.Errorf("auto create tenants: %w", err) + } + if schemaVersion > maxSchemaVersion { + maxSchemaVersion = schemaVersion + } + + b.metrics.BatchTenants(tenantCount) + b.metrics.BatchObjects(len(objects)) + b.metrics.BatchOp("total_preprocessing", beforePreProcessing.UnixNano()) + + var res BatchObjects + + beforePersistence := time.Now() + defer b.metrics.BatchOp("total_persistence_level", beforePersistence.UnixNano()) + + // Ensure that the local schema has caught up to the version we used to validate + if err := b.schemaManager.WaitForUpdate(ctx, maxSchemaVersion); err != nil { + return nil, fmt.Errorf("error waiting for local schema to catch up to version %d: %w", maxSchemaVersion, err) + } + if res, err = b.vectorRepo.BatchPutObjects(ctx, batchObjects, repl, maxSchemaVersion); err != nil { + return nil, NewErrInternal("batch objects: %#v", err) + } + + return res, nil +} + +func (b *BatchManager) validateAndGetVector(ctx context.Context, principal *models.Principal, + objects []*models.Object, repl *additional.ReplicationProperties, fetchedClasses map[string]versioned.Class, +) (BatchObjects, uint64) { + var ( + now = time.Now().UnixNano() / int64(time.Millisecond) + batchObjects = make(BatchObjects, len(objects)) + + objectsPerClass = make(map[string][]*models.Object) + originalIndexPerClass = make(map[string][]int) + validator = validation.New(b.vectorRepo.Exists, b.config, repl) + ) + + // validate each object and sort by class (==vectorizer) + var maxSchemaVersion uint64 + for i, obj := range objects { + batchObjects[i].OriginalIndex = i + + if obj.Class == "" { + batchObjects[i].Err = errors.New("object has an empty class") + continue + } + + schemaVersion, err := b.autoSchemaManager.autoSchema(ctx, principal, true, fetchedClasses, obj) + if err != nil { + batchObjects[i].Err = err + } + if schemaVersion > maxSchemaVersion { + maxSchemaVersion = schemaVersion + } + + if obj.ID == "" { + // Generate UUID for the new object + uid, err := generateUUID() + obj.ID = uid + batchObjects[i].Err = err + } else { + if _, err := uuid.Parse(obj.ID.String()); err != nil { + batchObjects[i].Err = err + } + } + if obj.Properties == nil { + obj.Properties = map[string]interface{}{} + } + obj.CreationTimeUnix = now + obj.LastUpdateTimeUnix = now + batchObjects[i].Object = obj + batchObjects[i].UUID = obj.ID + if batchObjects[i].Err != nil { + continue + } + + if len(fetchedClasses) == 0 || fetchedClasses[obj.Class].Class == nil { + batchObjects[i].Err = fmt.Errorf("class '%v' not present in schema", obj.Class) + continue + } + class := fetchedClasses[obj.Class].Class + + if err := validator.Object(ctx, class, obj, nil); err != nil { + batchObjects[i].Err = err + continue + } + + if objectsPerClass[obj.Class] == nil { + objectsPerClass[obj.Class] = make([]*models.Object, 0) + originalIndexPerClass[obj.Class] = make([]int, 0) + } + objectsPerClass[obj.Class] = append(objectsPerClass[obj.Class], obj) + originalIndexPerClass[obj.Class] = append(originalIndexPerClass[obj.Class], i) + } + + for className, objectsForClass := range objectsPerClass { + class := fetchedClasses[className] + errorsPerObj, err := b.modulesProvider.BatchUpdateVector(ctx, class.Class, objectsForClass, b.findObject, b.logger) + if err != nil { + for i := range objectsForClass { + origIndex := originalIndexPerClass[className][i] + batchObjects[origIndex].Err = err + } + } + for i, err := range errorsPerObj { + origIndex := originalIndexPerClass[className][i] + batchObjects[origIndex].Err = err + } + } + + return batchObjects, maxSchemaVersion +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/batch_add_test.go b/platform/dbops/binaries/weaviate-src/usecases/objects/batch_add_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8db5ff83879a48b6999451900daa6515dc8028ec --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/batch_add_test.go @@ -0,0 +1,503 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "fmt" + "testing" + + "github.com/go-openapi/strfmt" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/auth/authorization/mocks" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/config/runtime" +) + +func Test_BatchManager_AddObjects_WithNoVectorizerModule(t *testing.T) { + var ( + vectorRepo *fakeVectorRepo + modulesProvider *fakeModulesProvider + manager *BatchManager + ) + + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Vectorizer: config.VectorizerModuleNone, + Class: "Foo", + VectorIndexConfig: hnsw.UserConfig{}, + }, + { + Vectorizer: config.VectorizerModuleNone, + Class: "FooSkipped", + VectorIndexConfig: hnsw.UserConfig{ + Skip: true, + }, + }, + }, + }, + } + + resetAutoSchema := func(autoSchema bool) { + vectorRepo = &fakeVectorRepo{} + config := &config.WeaviateConfig{ + Config: config.Config{ + AutoSchema: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(autoSchema), + }, + TrackVectorDimensions: true, + }, + } + schemaManager := &fakeSchemaManager{ + GetSchemaResponse: schema, + } + logger, _ := test.NewNullLogger() + authorizer := mocks.NewMockAuthorizer() + modulesProvider = getFakeModulesProvider() + manager = NewBatchManager(vectorRepo, modulesProvider, schemaManager, config, logger, authorizer, nil, + NewAutoSchemaManager(schemaManager, vectorRepo, config, authorizer, logger, prometheus.NewPedanticRegistry())) + } + + reset := func() { + resetAutoSchema(false) + } + ctx := context.Background() + + t.Run("without any objects", func(t *testing.T) { + reset() + expectedErr := NewErrInvalidUserInput("invalid param 'objects': cannot be empty, need at least" + + " one object for batching") + + _, err := manager.AddObjects(ctx, nil, []*models.Object{}, []*string{}, nil) + + assert.Equal(t, expectedErr, err) + }) + + t.Run("with objects without IDs", func(t *testing.T) { + reset() + vectorRepo.On("BatchPutObjects", mock.Anything).Return(nil).Once() + objects := []*models.Object{ + { + Class: "Foo", + Vector: []float32{0.1, 0.1, 0.1111}, + }, + { + Class: "Foo", + Vector: []float32{0.2, 0.2, 0.2222}, + }, + } + + for range objects { + modulesProvider.On("BatchUpdateVector"). + Return(nil, nil) + } + + _, err := manager.AddObjects(ctx, nil, objects, []*string{}, nil) + repoCalledWithObjects := vectorRepo.Calls[0].Arguments[0].(BatchObjects) + + assert.Nil(t, err) + require.Len(t, repoCalledWithObjects, 2) + assert.Len(t, repoCalledWithObjects[0].UUID, 36, + "a uuid was set for the first object") + assert.Len(t, repoCalledWithObjects[1].UUID, 36, + "a uuid was set for the second object") + assert.Nil(t, repoCalledWithObjects[0].Err) + assert.Nil(t, repoCalledWithObjects[1].Err) + assert.Equal(t, models.C11yVector{0.1, 0.1, 0.1111}, repoCalledWithObjects[0].Object.Vector, + "the correct vector was used") + assert.Equal(t, models.C11yVector{0.2, 0.2, 0.2222}, repoCalledWithObjects[1].Object.Vector, + "the correct vector was used") + }) + + t.Run("object without class", func(t *testing.T) { + reset() + vectorRepo.On("BatchPutObjects", mock.Anything).Return(nil).Once() + objects := []*models.Object{ + { + Class: "", + Vector: []float32{0.1, 0.1, 0.1111}, + }, + { + Class: "Foo", + Vector: []float32{0.2, 0.2, 0.2222}, + }, + } + + for range objects { + modulesProvider.On("BatchUpdateVector"). + Return(nil, nil) + } + + resp, err := manager.AddObjects(ctx, nil, objects, []*string{}, nil) + repoCalledWithObjects := vectorRepo.Calls[0].Arguments[0].(BatchObjects) + assert.Nil(t, err) + assert.NotNil(t, resp) + require.Len(t, repoCalledWithObjects, 2) + + require.NotNil(t, resp[0].Err) + require.Equal(t, resp[0].Err.Error(), "object has an empty class") + require.Nil(t, resp[1].Err) + }) + + t.Run("with objects without IDs and nonexistent class and auto schema enabled", func(t *testing.T) { + resetAutoSchema(true) + vectorRepo.On("BatchPutObjects", mock.Anything).Return(nil).Once() + objects := []*models.Object{ + { + Class: "NonExistentFoo", + Vector: []float32{0.1, 0.1, 0.1111}, + }, + { + Class: "NonExistentFoo", + Vector: []float32{0.2, 0.2, 0.2222}, + }, + } + + for range objects { + modulesProvider.On("BatchUpdateVector"). + Return(nil, nil) + } + + _, err := manager.AddObjects(ctx, nil, objects, []*string{}, nil) + repoCalledWithObjects := vectorRepo.Calls[0].Arguments[0].(BatchObjects) + + assert.Nil(t, err) + require.Len(t, repoCalledWithObjects, 2) + assert.Len(t, repoCalledWithObjects[0].UUID, 36, + "a uuid was set for the first object") + assert.Len(t, repoCalledWithObjects[1].UUID, 36, + "a uuid was set for the second object") + assert.Nil(t, repoCalledWithObjects[0].Err) + assert.Nil(t, repoCalledWithObjects[1].Err) + assert.Equal(t, models.C11yVector{0.1, 0.1, 0.1111}, repoCalledWithObjects[0].Object.Vector, + "the correct vector was used") + assert.Equal(t, models.C11yVector{0.2, 0.2, 0.2222}, repoCalledWithObjects[1].Object.Vector, + "the correct vector was used") + }) + + t.Run("with user-specified IDs", func(t *testing.T) { + reset() + vectorRepo.On("BatchPutObjects", mock.Anything).Return(nil).Once() + id1 := strfmt.UUID("2d3942c3-b412-4d80-9dfa-99a646629cd2") + id2 := strfmt.UUID("cf918366-3d3b-4b90-9bc6-bc5ea8762ff6") + objects := []*models.Object{ + { + ID: id1, + Class: "Foo", + Vector: []float32{0.1, 0.1, 0.1111}, + }, + { + ID: id2, + Class: "Foo", + Vector: []float32{0.2, 0.2, 0.2222}, + }, + } + + for range objects { + modulesProvider.On("BatchUpdateVector"). + Return(nil, nil) + } + + _, err := manager.AddObjects(ctx, nil, objects, []*string{}, nil) + repoCalledWithObjects := vectorRepo.Calls[0].Arguments[0].(BatchObjects) + + assert.Nil(t, err) + require.Len(t, repoCalledWithObjects, 2) + assert.Equal(t, id1, repoCalledWithObjects[0].UUID, "the user-specified uuid was used") + assert.Equal(t, id2, repoCalledWithObjects[1].UUID, "the user-specified uuid was used") + assert.Nil(t, repoCalledWithObjects[0].Err) + assert.Nil(t, repoCalledWithObjects[1].Err) + assert.Equal(t, models.C11yVector{0.1, 0.1, 0.1111}, repoCalledWithObjects[0].Object.Vector, + "the correct vector was used") + assert.Equal(t, models.C11yVector{0.2, 0.2, 0.2222}, repoCalledWithObjects[1].Object.Vector, + "the correct vector was used") + }) + + t.Run("with an invalid user-specified IDs", func(t *testing.T) { + reset() + vectorRepo.On("BatchPutObjects", mock.Anything).Return(nil).Once() + id1 := strfmt.UUID("invalid") + id2 := strfmt.UUID("cf918366-3d3b-4b90-9bc6-bc5ea8762ff6") + objects := []*models.Object{ + { + ID: id1, + Class: "Foo", + Vector: []float32{0.1, 0.1, 0.1111}, + }, + { + ID: id2, + Class: "Foo", + Vector: []float32{0.2, 0.2, 0.2222}, + }, + } + + for range objects { + modulesProvider.On("BatchUpdateVector"). + Return(nil, nil) + } + + _, err := manager.AddObjects(ctx, nil, objects, []*string{}, nil) + repoCalledWithObjects := vectorRepo.Calls[0].Arguments[0].(BatchObjects) + + assert.Nil(t, err) + require.Len(t, repoCalledWithObjects, 2) + assert.Equal(t, repoCalledWithObjects[0].Err.Error(), fmt.Sprintf("invalid UUID length: %d", len(id1))) + assert.Equal(t, id2, repoCalledWithObjects[1].UUID, "the user-specified uuid was used") + }) + + t.Run("without any vectors", func(t *testing.T) { + // prior to v1.10 this was the desired behavior: + // note that this should fail on class Foo, but be accepted on class + // FooSkipped + // + // However, since v1.10, it is acceptable to exclude a vector, even if + // indexing is not skipped. In this case only the individual element is + // skipped. See https://github.com/weaviate/weaviate/issues/1800 + reset() + vectorRepo.On("BatchPutObjects", mock.Anything).Return(nil).Once() + objects := []*models.Object{ + { + Class: "Foo", + }, + { + Class: "FooSkipped", + }, + } + + for range objects { + modulesProvider.On("BatchUpdateVector"). + Return(nil, nil) + } + + _, err := manager.AddObjects(ctx, nil, objects, []*string{}, nil) + repoCalledWithObjects := vectorRepo.Calls[0].Arguments[0].(BatchObjects) + + assert.Nil(t, err) + require.Len(t, repoCalledWithObjects, 2) + assert.Nil(t, repoCalledWithObjects[0].Err) + assert.Nil(t, repoCalledWithObjects[1].Err) + }) +} + +func Test_BatchManager_AddObjects_WithExternalVectorizerModule(t *testing.T) { + var ( + vectorRepo *fakeVectorRepo + modulesProvider *fakeModulesProvider + manager *BatchManager + ) + + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Vectorizer: config.VectorizerModuleText2VecContextionary, + VectorIndexConfig: hnsw.UserConfig{}, + Class: "Foo", + }, + }, + }, + } + + reset := func() { + vectorRepo = &fakeVectorRepo{} + config := &config.WeaviateConfig{} + schemaManager := &fakeSchemaManager{ + GetSchemaResponse: schema, + } + logger, _ := test.NewNullLogger() + authorizer := mocks.NewMockAuthorizer() + modulesProvider = getFakeModulesProvider() + manager = NewBatchManager(vectorRepo, modulesProvider, schemaManager, config, logger, authorizer, nil, + NewAutoSchemaManager(schemaManager, vectorRepo, config, authorizer, logger, prometheus.NewPedanticRegistry())) + } + + ctx := context.Background() + + t.Run("without any objects", func(t *testing.T) { + reset() + expectedErr := NewErrInvalidUserInput("invalid param 'objects': cannot be empty, need at least" + + " one object for batching") + + _, err := manager.AddObjects(ctx, nil, []*models.Object{}, []*string{}, nil) + + assert.Equal(t, expectedErr, err) + }) + + t.Run("with objects without IDs", func(t *testing.T) { + reset() + vectorRepo.On("BatchPutObjects", mock.Anything).Return(nil).Once() + expectedVector := models.C11yVector{0, 1, 2} + objects := []*models.Object{ + { + Class: "Foo", + }, + { + Class: "Foo", + }, + } + + for range objects { + modulesProvider.On("BatchUpdateVector"). + Return(expectedVector, nil) + } + + _, err := manager.AddObjects(ctx, nil, objects, []*string{}, nil) + repoCalledWithObjects := vectorRepo.Calls[0].Arguments[0].(BatchObjects) + + assert.Nil(t, err) + require.Len(t, repoCalledWithObjects, 2) + assert.Len(t, repoCalledWithObjects[0].UUID, 36, "a uuid was set for the first object") + assert.Len(t, repoCalledWithObjects[1].UUID, 36, "a uuid was set for the second object") + assert.Nil(t, repoCalledWithObjects[0].Err) + assert.Nil(t, repoCalledWithObjects[1].Err) + assert.Equal(t, expectedVector, repoCalledWithObjects[0].Object.Vector, + "the correct vector was used") + assert.Equal(t, expectedVector, repoCalledWithObjects[1].Object.Vector, + "the correct vector was used") + }) + + t.Run("with user-specified IDs", func(t *testing.T) { + reset() + vectorRepo.On("BatchPutObjects", mock.Anything).Return(nil).Once() + id1 := strfmt.UUID("2d3942c3-b412-4d80-9dfa-99a646629cd2") + id2 := strfmt.UUID("cf918366-3d3b-4b90-9bc6-bc5ea8762ff6") + objects := []*models.Object{ + { + ID: id1, + Class: "Foo", + }, + { + ID: id2, + Class: "Foo", + }, + } + + for range objects { + modulesProvider.On("BatchUpdateVector"). + Return(nil, nil) + } + + _, err := manager.AddObjects(ctx, nil, objects, []*string{}, nil) + repoCalledWithObjects := vectorRepo.Calls[0].Arguments[0].(BatchObjects) + + assert.Nil(t, err) + require.Len(t, repoCalledWithObjects, 2) + assert.Equal(t, id1, repoCalledWithObjects[0].UUID, "the user-specified uuid was used") + assert.Equal(t, id2, repoCalledWithObjects[1].UUID, "the user-specified uuid was used") + }) + + t.Run("with an invalid user-specified IDs", func(t *testing.T) { + reset() + vectorRepo.On("BatchPutObjects", mock.Anything).Return(nil).Once() + id1 := strfmt.UUID("invalid") + id2 := strfmt.UUID("cf918366-3d3b-4b90-9bc6-bc5ea8762ff6") + objects := []*models.Object{ + { + ID: id1, + Class: "Foo", + }, + { + ID: id2, + Class: "Foo", + }, + } + + for range objects { + modulesProvider.On("BatchUpdateVector"). + Return(nil, nil) + } + + _, err := manager.AddObjects(ctx, nil, objects, []*string{}, nil) + repoCalledWithObjects := vectorRepo.Calls[0].Arguments[0].(BatchObjects) + + assert.Nil(t, err) + require.Len(t, repoCalledWithObjects, 2) + assert.Equal(t, repoCalledWithObjects[0].Err.Error(), fmt.Sprintf("invalid UUID length: %d", len(id1))) + assert.Equal(t, id2, repoCalledWithObjects[1].UUID, "the user-specified uuid was used") + }) +} + +func Test_BatchManager_AddObjectsEmptyProperties(t *testing.T) { + var ( + vectorRepo *fakeVectorRepo + modulesProvider *fakeModulesProvider + manager *BatchManager + ) + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "TestClass", + VectorIndexConfig: hnsw.UserConfig{}, + + Properties: []*models.Property{ + { + Name: "strings", + DataType: schema.DataTypeTextArray.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + }, + }, + }, + } + reset := func() { + vectorRepo = &fakeVectorRepo{} + vectorRepo.On("BatchPutObjects", mock.Anything).Return(nil).Once() + config := &config.WeaviateConfig{} + schemaManager := &fakeSchemaManager{ + GetSchemaResponse: schema, + } + logger, _ := test.NewNullLogger() + authorizer := mocks.NewMockAuthorizer() + modulesProvider = getFakeModulesProvider() + manager = NewBatchManager(vectorRepo, modulesProvider, schemaManager, config, logger, authorizer, nil, + NewAutoSchemaManager(schemaManager, vectorRepo, config, authorizer, logger, prometheus.NewPedanticRegistry())) + } + reset() + objects := []*models.Object{ + { + ID: strfmt.UUID("cf918366-3d3b-4b90-9bc6-bc5ea8762ff6"), + Class: "TestClass", + }, + { + ID: strfmt.UUID("cf918366-3d3b-4b90-9bc6-bc5ea8762ff3"), + Class: "TestClass", + Properties: map[string]interface{}{ + "name": "testName", + }, + }, + } + require.Nil(t, objects[0].Properties) + require.NotNil(t, objects[1].Properties) + + ctx := context.Background() + for range objects { + modulesProvider.On("BatchUpdateVector"). + Return(nil, nil) + } + addedObjects, err := manager.AddObjects(ctx, nil, objects, []*string{}, nil) + assert.Nil(t, err) + require.Len(t, addedObjects, 2) + require.NotNil(t, addedObjects[0].Object.Properties) + require.NotNil(t, addedObjects[1].Object.Properties) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/batch_delete.go b/platform/dbops/binaries/weaviate-src/usecases/objects/batch_delete.go new file mode 100644 index 0000000000000000000000000000000000000000..fe9c1e155b0cee609a8a2249502823d553e0b4e5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/batch_delete.go @@ -0,0 +1,175 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "fmt" + "time" + + "github.com/pkg/errors" + + "github.com/weaviate/weaviate/adapters/handlers/rest/filterext" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/classcache" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/verbosity" + "github.com/weaviate/weaviate/usecases/auth/authorization" +) + +// DeleteObjects deletes objects in batch based on the match filter +func (b *BatchManager) DeleteObjects(ctx context.Context, principal *models.Principal, + match *models.BatchDeleteMatch, deletionTimeUnixMilli *int64, dryRun *bool, output *string, + repl *additional.ReplicationProperties, tenant string, +) (*BatchDeleteResponse, error) { + class := "*" + if match != nil { + match.Class, _ = b.resolveAlias(match.Class) + class = match.Class + } + + err := b.authorizer.Authorize(ctx, principal, authorization.DELETE, authorization.ShardsData(class, tenant)...) + if err != nil { + return nil, err + } + + ctx = classcache.ContextWithClassCache(ctx) + + b.metrics.BatchDeleteInc() + defer b.metrics.BatchDeleteDec() + + return b.deleteObjects(ctx, principal, match, deletionTimeUnixMilli, dryRun, output, repl, tenant) +} + +// DeleteObjectsFromGRPCAfterAuth deletes objects in batch based on the match filter +func (b *BatchManager) DeleteObjectsFromGRPCAfterAuth(ctx context.Context, principal *models.Principal, + params BatchDeleteParams, + repl *additional.ReplicationProperties, tenant string, +) (BatchDeleteResult, error) { + b.metrics.BatchDeleteInc() + defer b.metrics.BatchDeleteDec() + + deletionTime := time.UnixMilli(b.timeSource.Now()) + return b.vectorRepo.BatchDeleteObjects(ctx, params, deletionTime, repl, tenant, 0) +} + +func (b *BatchManager) deleteObjects(ctx context.Context, principal *models.Principal, + match *models.BatchDeleteMatch, deletionTimeUnixMilli *int64, dryRun *bool, output *string, + repl *additional.ReplicationProperties, tenant string, +) (*BatchDeleteResponse, error) { + params, schemaVersion, err := b.validateBatchDelete(ctx, principal, match, dryRun, output) + if err != nil { + return nil, errors.Wrap(err, "validate") + } + + // Ensure that the local schema has caught up to the version we used to validate + if err := b.schemaManager.WaitForUpdate(ctx, schemaVersion); err != nil { + return nil, fmt.Errorf("error waiting for local schema to catch up to version %d: %w", schemaVersion, err) + } + var deletionTime time.Time + if deletionTimeUnixMilli != nil { + deletionTime = time.UnixMilli(*deletionTimeUnixMilli) + } + + result, err := b.vectorRepo.BatchDeleteObjects(ctx, *params, deletionTime, repl, tenant, schemaVersion) + if err != nil { + return nil, fmt.Errorf("batch delete objects: %w", err) + } + + return b.toResponse(match, params.Output, result) +} + +func (b *BatchManager) toResponse(match *models.BatchDeleteMatch, output string, + result BatchDeleteResult, +) (*BatchDeleteResponse, error) { + response := &BatchDeleteResponse{ + Match: match, + Output: output, + DeletionTime: result.DeletionTime, + DryRun: result.DryRun, + Result: BatchDeleteResult{ + Matches: result.Matches, + Limit: result.Limit, + Objects: result.Objects, + }, + } + return response, nil +} + +func (b *BatchManager) validateBatchDelete(ctx context.Context, principal *models.Principal, + match *models.BatchDeleteMatch, dryRun *bool, output *string, +) (*BatchDeleteParams, uint64, error) { + if match == nil { + return nil, 0, errors.New("empty match clause") + } + + if len(match.Class) == 0 { + return nil, 0, errors.New("empty match.class clause") + } + + if match.Where == nil { + return nil, 0, errors.New("empty match.where clause") + } + + // Validate schema given in body with the weaviate schema + vclasses, err := b.schemaManager.GetCachedClass(ctx, principal, match.Class) + if err != nil { + return nil, 0, fmt.Errorf("failed to get class: %s: %w", match.Class, err) + } + if vclasses[match.Class].Class == nil { + return nil, 0, fmt.Errorf("failed to get class: %s", match.Class) + } + class := vclasses[match.Class].Class + + filter, err := filterext.Parse(match.Where, class.Class) + if err != nil { + return nil, 0, fmt.Errorf("failed to parse where filter: %w", err) + } + + err = filters.ValidateFilters(b.classGetterFunc(ctx, principal), filter) + if err != nil { + return nil, 0, fmt.Errorf("invalid where filter: %w", err) + } + + dryRunParam := false + if dryRun != nil { + dryRunParam = *dryRun + } + + outputParam, err := verbosity.ParseOutput(output) + if err != nil { + return nil, 0, err + } + + params := &BatchDeleteParams{ + ClassName: schema.ClassName(class.Class), + Filters: filter, + DryRun: dryRunParam, + Output: outputParam, + } + return params, vclasses[match.Class].Version, nil +} + +func (b *BatchManager) classGetterFunc(ctx context.Context, principal *models.Principal) func(string) (*models.Class, error) { + return func(name string) (*models.Class, error) { + if err := b.authorizer.Authorize(ctx, principal, authorization.READ, authorization.Collections(name)...); err != nil { + return nil, err + } + class := b.schemaManager.ReadOnlyClass(name) + if class == nil { + return nil, fmt.Errorf("could not find class %s in schema", name) + } + return class, nil + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/batch_delete_test.go b/platform/dbops/binaries/weaviate-src/usecases/objects/batch_delete_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2779f2b197206b8c286ae71f229b134d1d13b4d3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/batch_delete_test.go @@ -0,0 +1,190 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/entities/verbosity" + "github.com/weaviate/weaviate/usecases/auth/authorization/mocks" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/config/runtime" +) + +func Test_BatchDelete_RequestValidation(t *testing.T) { + var ( + vectorRepo *fakeVectorRepo + manager *BatchManager + ) + + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "Foo", + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + VectorIndexConfig: hnsw.UserConfig{}, + Vectorizer: config.VectorizerModuleNone, + }, + }, + }, + } + + resetAutoSchema := func(autoSchema bool) { + vectorRepo = &fakeVectorRepo{} + config := &config.WeaviateConfig{ + Config: config.Config{ + AutoSchema: config.AutoSchema{ + Enabled: runtime.NewDynamicValue(autoSchema), + }, + }, + } + schemaManager := &fakeSchemaManager{ + GetSchemaResponse: schema, + } + logger, _ := test.NewNullLogger() + authorizer := mocks.NewMockAuthorizer() + modulesProvider := getFakeModulesProvider() + manager = NewBatchManager(vectorRepo, modulesProvider, schemaManager, config, logger, authorizer, nil, + NewAutoSchemaManager(schemaManager, vectorRepo, config, authorizer, logger, prometheus.NewPedanticRegistry())) + } + + reset := func() { + resetAutoSchema(false) + } + ctx := context.Background() + + reset() + + t.Run("with invalid input", func(t *testing.T) { + tests := []struct { + input *models.BatchDelete + expectedError string + }{ + { + input: &models.BatchDelete{ + DryRun: ptBool(false), + Output: ptString(verbosity.OutputVerbose), + Match: &models.BatchDeleteMatch{ + Class: "SomeClass", + Where: &models.WhereFilter{ + Path: []string{"some", "path"}, + Operator: "Equal", + ValueText: ptString("value"), + }, + }, + }, + expectedError: "validate: failed to get class: SomeClass", + }, + { + input: &models.BatchDelete{ + DryRun: ptBool(false), + Output: ptString(verbosity.OutputVerbose), + Match: &models.BatchDeleteMatch{ + Class: "Foo", + Where: &models.WhereFilter{ + Path: []string{"some"}, + Operator: "Equal", + ValueText: ptString("value"), + }, + }, + }, + expectedError: "validate: invalid where filter: no such prop with name 'some' found in class 'Foo' " + + "in the schema. Check your schema files for which properties in this class are available", + }, + { + input: &models.BatchDelete{ + DryRun: ptBool(false), + Output: ptString(verbosity.OutputVerbose), + }, + expectedError: "validate: empty match clause", + }, + { + input: &models.BatchDelete{ + DryRun: ptBool(false), + Output: ptString(verbosity.OutputVerbose), + Match: &models.BatchDeleteMatch{ + Class: "", + }, + }, + expectedError: "validate: empty match.class clause", + }, + { + input: &models.BatchDelete{ + DryRun: ptBool(false), + Output: ptString(verbosity.OutputVerbose), + Match: &models.BatchDeleteMatch{ + Class: "Foo", + }, + }, + expectedError: "validate: empty match.where clause", + }, + { + input: &models.BatchDelete{ + DryRun: ptBool(false), + Output: ptString(verbosity.OutputVerbose), + Match: &models.BatchDeleteMatch{ + Class: "Foo", + Where: &models.WhereFilter{ + Path: []string{}, + Operator: "Equal", + ValueText: ptString("name"), + }, + }, + }, + expectedError: "validate: failed to parse where filter: invalid where filter: field 'path': must have at least one element", + }, + { + input: &models.BatchDelete{ + DryRun: ptBool(false), + Output: ptString("Simplified Chinese"), + Match: &models.BatchDeleteMatch{ + Class: "Foo", + Where: &models.WhereFilter{ + Path: []string{"name"}, + Operator: "Equal", + ValueText: ptString("value"), + }, + }, + }, + expectedError: "validate: invalid output: \"Simplified Chinese\", possible values are: \"minimal\", \"verbose\"", + }, + } + + for _, test := range tests { + _, err := manager.DeleteObjects(ctx, nil, test.input.Match, test.input.DeletionTimeUnixMilli, test.input.DryRun, test.input.Output, nil, "") + assert.Equal(t, test.expectedError, err.Error()) + } + }) +} + +func ptBool(b bool) *bool { + return &b +} + +func ptString(s string) *string { + return &s +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/batch_manager.go b/platform/dbops/binaries/weaviate-src/usecases/objects/batch_manager.go new file mode 100644 index 0000000000000000000000000000000000000000..ebebf3a736da69a0bed6aa45364e78d5e6fa0f29 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/batch_manager.go @@ -0,0 +1,77 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "time" + + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/monitoring" + "github.com/weaviate/weaviate/usecases/objects/alias" +) + +// BatchManager manages kind changes in batch at a use-case level , i.e. +// agnostic of underlying databases or storage providers +type BatchManager struct { + config *config.WeaviateConfig + schemaManager schemaManager + logger logrus.FieldLogger + authorizer authorization.Authorizer + vectorRepo BatchVectorRepo + timeSource timeSource + modulesProvider ModulesProvider + autoSchemaManager *AutoSchemaManager + metrics *Metrics +} + +type BatchVectorRepo interface { + VectorRepo + batchRepoNew +} + +type batchRepoNew interface { + BatchPutObjects(ctx context.Context, objects BatchObjects, + repl *additional.ReplicationProperties, schemaVersion uint64) (BatchObjects, error) + BatchDeleteObjects(ctx context.Context, params BatchDeleteParams, deletionTime time.Time, + repl *additional.ReplicationProperties, tenant string, schemaVersion uint64) (BatchDeleteResult, error) + AddBatchReferences(ctx context.Context, references BatchReferences, + repl *additional.ReplicationProperties, schemaVersion uint64) (BatchReferences, error) +} + +// NewBatchManager creates a new manager +func NewBatchManager(vectorRepo BatchVectorRepo, modulesProvider ModulesProvider, + schemaManager schemaManager, config *config.WeaviateConfig, + logger logrus.FieldLogger, authorizer authorization.Authorizer, + prom *monitoring.PrometheusMetrics, autoSchemaManager *AutoSchemaManager, +) *BatchManager { + return &BatchManager{ + config: config, + schemaManager: schemaManager, + logger: logger, + vectorRepo: vectorRepo, + timeSource: defaultTimeSource{}, + modulesProvider: modulesProvider, + authorizer: authorizer, + autoSchemaManager: autoSchemaManager, + metrics: NewMetrics(prom), + } +} + +// Alias support +func (m *BatchManager) resolveAlias(class string) (className, aliasName string) { + return alias.ResolveAlias(m.schemaManager, class) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/batch_references_add.go b/platform/dbops/binaries/weaviate-src/usecases/objects/batch_references_add.go new file mode 100644 index 0000000000000000000000000000000000000000..c451e48a1b77d7af1276399aee01564c1e369b1a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/batch_references_add.go @@ -0,0 +1,380 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/weaviate/weaviate/entities/versioned" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/classcache" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/usecases/auth/authorization" +) + +// AddReferences Class Instances in batch to the connected DB +func (b *BatchManager) AddReferences(ctx context.Context, principal *models.Principal, + refs []*models.BatchReference, repl *additional.ReplicationProperties, +) (BatchReferences, error) { + // only validates form of input, no schema access + if err := validateReferenceForm(refs); err != nil { + return nil, NewErrInvalidUserInput("invalid params: %v", err) + } + + ctx = classcache.ContextWithClassCache(ctx) + + batchReferences := validateReferencesConcurrently(ctx, refs, b.logger) + + uniqueClass := map[string]struct{}{} + type classAndShard struct { + Class string + Shard string + } + uniqueClassShard := map[string]classAndShard{} + for idx := range batchReferences { + if batchReferences[idx].Err != nil { + continue + } + class := batchReferences[idx].From.Class.String() + uniqueClass[class] = struct{}{} + uniqueClassShard[class+"#"+batchReferences[idx].Tenant] = classAndShard{Class: class, Shard: batchReferences[idx].Tenant} + } + + allClasses := make([]string, 0, len(uniqueClass)) + for classname := range uniqueClass { + allClasses = append(allClasses, classname) + } + fetchedClasses, err := b.schemaManager.GetCachedClass(ctx, principal, allClasses...) + if err != nil { + return nil, err + } + + var pathsData []string + for _, val := range uniqueClassShard { + pathsData = append(pathsData, authorization.ShardsData(val.Class, val.Shard)...) + } + + if err := b.authorizer.Authorize(ctx, principal, authorization.UPDATE, pathsData...); err != nil { + return nil, err + } + + b.metrics.BatchRefInc() + defer b.metrics.BatchRefDec() + + return b.addReferences(ctx, principal, batchReferences, repl, fetchedClasses) +} + +func (b *BatchManager) addReferences(ctx context.Context, principal *models.Principal, + refs BatchReferences, repl *additional.ReplicationProperties, fetchedClasses map[string]versioned.Class, +) (BatchReferences, error) { + if err := b.autodetectToClass(refs, fetchedClasses); err != nil { + return nil, err + } + + // MT validation must be done after auto-detection as we cannot know the target class beforehand in all cases + type classAndShard struct { + Class string + Shard string + } + uniqueClassShard := map[string]classAndShard{} + var schemaVersion uint64 + for i, ref := range refs { + if ref.Err != nil { + continue + } + + if shouldValidateMultiTenantRef(ref.Tenant, ref.From, ref.To) { + // can only validate multi-tenancy when everything above succeeds + classVersion, err := validateReferenceMultiTenancy(ctx, principal, b.schemaManager, b.vectorRepo, ref.From, ref.To, ref.Tenant, fetchedClasses) + if err != nil { + refs[i].Err = err + } + if classVersion > schemaVersion { + schemaVersion = classVersion + } + } + + uniqueClassShard[ref.To.Class+"#"+ref.Tenant] = classAndShard{Class: ref.To.Class, Shard: ref.Tenant} + } + + shardsDataPaths := make([]string, 0, len(uniqueClassShard)) + for _, val := range uniqueClassShard { + shardsDataPaths = append(shardsDataPaths, authorization.ShardsData(val.Class, val.Shard)...) + } + + // target object is checked for existence - this is currently ONLY done with tenants enabled, but we should require + // the permission for everything, to not complicate things too much + if err := b.authorizer.Authorize(ctx, principal, authorization.READ, shardsDataPaths...); err != nil { + return nil, err + } + + // Ensure that the local schema has caught up to the version we used to validate + if err := b.schemaManager.WaitForUpdate(ctx, schemaVersion); err != nil { + return nil, fmt.Errorf("error waiting for local schema to catch up to version %d: %w", schemaVersion, err) + } + if res, err := b.vectorRepo.AddBatchReferences(ctx, refs, repl, schemaVersion); err != nil { + return nil, NewErrInternal("could not add batch request to connector: %v", err) + } else { + return res, nil + } +} + +func validateReferenceForm(refs []*models.BatchReference) error { + if len(refs) == 0 { + return fmt.Errorf("length cannot be 0, need at least one reference for batching") + } + + return nil +} + +func validateReferencesConcurrently(ctx context.Context, refs []*models.BatchReference, logger logrus.FieldLogger) BatchReferences { + c := make(chan BatchReference, len(refs)) + wg := new(sync.WaitGroup) + + // Generate a goroutine for each separate request + for i, ref := range refs { + i := i + ref := ref + wg.Add(1) + enterrors.GoWrapper(func() { validateReference(ctx, wg, ref, i, &c) }, logger) + } + + wg.Wait() + close(c) + + return referencesChanToSlice(c) +} + +// autodetectToClass gets the class name of the referenced class through the schema definition +func (b *BatchManager) autodetectToClass(batchReferences BatchReferences, fetchedClasses map[string]versioned.Class) error { + classPropTarget := make(map[string]string, len(batchReferences)) + for i, ref := range batchReferences { + // get to class from property datatype + if ref.To.Class != "" || ref.Err != nil { + continue + } + className := string(ref.From.Class) + propName := schema.LowercaseFirstLetter(string(ref.From.Property)) + + target, ok := classPropTarget[className+propName] + if !ok { + class := fetchedClasses[className] + if class.Class == nil { + batchReferences[i].Err = fmt.Errorf("source class %q not found in schema", className) + continue + } + + prop, err := schema.GetPropertyByName(class.Class, propName) + if err != nil { + batchReferences[i].Err = fmt.Errorf("property %s does not exist for class %s", propName, className) + continue + } + if len(prop.DataType) > 1 { + continue // can't auto-detect for multi-target + } + target = prop.DataType[0] // datatype is the name of the class that is referenced + classPropTarget[className+propName] = target + } + batchReferences[i].To.Class = target + } + return nil +} + +func validateReference(ctx context.Context, + wg *sync.WaitGroup, ref *models.BatchReference, i int, resultsC *chan BatchReference, +) { + defer wg.Done() + var validateErrors []error + source, err := crossref.ParseSource(string(ref.From)) + if err != nil { + validateErrors = append(validateErrors, err) + } else if !source.Local { + validateErrors = append(validateErrors, fmt.Errorf("source class must always point to the local peer, but got %s", + source.PeerName)) + } + + target, err := crossref.Parse(string(ref.To)) + if err != nil { + validateErrors = append(validateErrors, err) + } else if !target.Local { + validateErrors = append(validateErrors, fmt.Errorf("importing network references in batch is not possible. "+ + "Please perform a regular non-batch import for network references, got peer %s", + target.PeerName)) + } + + // target id must be lowercase + if target != nil { + target.TargetID = strfmt.UUID(strings.ToLower(target.TargetID.String())) + } + + if len(validateErrors) == 0 { + err = nil + } else { + err = joinErrors(validateErrors) + } + + *resultsC <- BatchReference{ + From: source, + To: target, + Err: err, + OriginalIndex: i, + Tenant: ref.Tenant, + } +} + +func validateReferenceMultiTenancy(ctx context.Context, + principal *models.Principal, schemaManager schemaManager, + repo VectorRepo, source *crossref.RefSource, target *crossref.Ref, + tenant string, fetchedClasses map[string]versioned.Class, +) (uint64, error) { + if source == nil || target == nil { + return 0, fmt.Errorf("can't validate multi-tenancy for nil refs") + } + + sourceClass, targetClass, schemaVersion, err := getReferenceClasses( + ctx, principal, schemaManager, source.Class.String(), source.Property.String(), target.Class, fetchedClasses) + if err != nil { + return 0, err + } + + sourceEnabled := schema.MultiTenancyEnabled(sourceClass) + targetEnabled := schema.MultiTenancyEnabled(targetClass) + + if !sourceEnabled && targetEnabled { + return 0, fmt.Errorf("invalid reference: cannot reference a multi-tenant " + + "enabled class from a non multi-tenant enabled class") + } + if sourceEnabled && !targetEnabled { + if err := validateTenantRefObject(ctx, repo, sourceClass, source.TargetID, tenant); err != nil { + return 0, fmt.Errorf("source: %w", err) + } + if err := validateTenantRefObject(ctx, repo, targetClass, target.TargetID, ""); err != nil { + return 0, fmt.Errorf("target: %w", err) + } + } + // if both classes have MT enabled but different tenant keys, + // no cross-tenant references can be made + if sourceEnabled && targetEnabled { + if err := validateTenantRefObject(ctx, repo, sourceClass, source.TargetID, tenant); err != nil { + return 0, fmt.Errorf("source: %w", err) + } + if err := validateTenantRefObject(ctx, repo, targetClass, target.TargetID, tenant); err != nil { + return 0, fmt.Errorf("target: %w", err) + } + } + + return schemaVersion, nil +} + +func getReferenceClasses(ctx context.Context, + principal *models.Principal, schemaManager schemaManager, + classFrom, fromProperty, toClassName string, fetchedClasses map[string]versioned.Class, +) (sourceClass *models.Class, targetClass *models.Class, schemaVersion uint64, err error) { + if classFrom == "" { + err = fmt.Errorf("references involving a multi-tenancy enabled class " + + "requires class name in the source beacon url") + return + } + + fromClass := fetchedClasses[classFrom] + if fromClass.Class == nil { + err = fmt.Errorf("source class %q not found in schema", classFrom) + return + } + + sourceClass = fromClass.Class + schemaVersion = fromClass.Version + + // we can auto-detect the to class from the schema if it is a single target reference + if toClassName == "" { + refProp, err2 := schema.GetPropertyByName(sourceClass, fromProperty) + if err2 != nil { + err = fmt.Errorf("get source refprop %q: %w", classFrom, err2) + return + } + + if len(refProp.DataType) != 1 { + err = fmt.Errorf("multi-target references require the class name in the target beacon url") + return + } + toClassName = refProp.DataType[0] + } + + toClass, ok := fetchedClasses[toClassName] + if !ok { + targetVclasses, err2 := schemaManager.GetCachedClass(ctx, principal, toClassName) + if err2 != nil { + err = fmt.Errorf("get target class %q: %w", toClassName, err2) + return + } + toClass = targetVclasses[toClassName] + fetchedClasses[toClassName] = toClass + } + if toClass.Class == nil { + err = fmt.Errorf("target class %q not found in schema", classFrom) + return + } + targetClass = toClass.Class + + return +} + +// validateTenantRefObject ensures that object exist for the given tenant key. +// This asserts that no cross-tenant references can occur, +// as a class+id which belongs to a different +// tenant will not be found in the searched tenant shard +func validateTenantRefObject(ctx context.Context, repo VectorRepo, + class *models.Class, ID strfmt.UUID, tenant string, +) error { + exists, err := repo.Exists(ctx, class.Class, ID, nil, tenant) + if err != nil { + return fmt.Errorf("get object %s/%s: %w", class.Class, ID, err) + } + if !exists { + return fmt.Errorf("object %s/%s not found for tenant %q", class.Class, ID, tenant) + } + return nil +} + +func referencesChanToSlice(c chan BatchReference) BatchReferences { + result := make([]BatchReference, len(c)) + for reference := range c { + result[reference.OriginalIndex] = reference + } + + return result +} + +func joinErrors(errors []error) error { + errorStrings := []string{} + for _, err := range errors { + if err != nil { + errorStrings = append(errorStrings, err.Error()) + } + } + + if len(errorStrings) == 0 { + return nil + } + + return fmt.Errorf("%s", strings.Join(errorStrings, ", ")) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/batch_types.go b/platform/dbops/binaries/weaviate-src/usecases/objects/batch_types.go new file mode 100644 index 0000000000000000000000000000000000000000..e1f35a7e659e3b18afdeae75fd539fae41df3be7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/batch_types.go @@ -0,0 +1,97 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "time" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/crossref" +) + +// BatchObject is a helper type that groups all the info about one object in a +// batch that belongs together, i.e. uuid, object body and error state. +// +// Consumers of an Object (i.e. database connector) should always check +// whether an error is already present by the time they receive a batch object. +// Errors can be introduced at all levels, e.g. validation. +// +// However, error'd objects are not removed to make sure that the list in +// Objects matches the order and content of the incoming batch request +type BatchObject struct { + OriginalIndex int + Err error + Object *models.Object + UUID strfmt.UUID +} + +// BatchObjects groups many Object items together. The order matches the +// order from the original request. It can be turned into the expected response +// type using the .Response() method +type BatchObjects []BatchObject + +// BatchReference is a helper type that groups all the info about one references in a +// batch that belongs together, i.e. from, to, original index and error state +// +// Consumers of an Object (i.e. database connector) should always check +// whether an error is already present by the time they receive a batch object. +// Errors can be introduced at all levels, e.g. validation. +// +// However, error'd objects are not removed to make sure that the list in +// Objects matches the order and content of the incoming batch request +type BatchReference struct { + OriginalIndex int `json:"originalIndex"` + Err error `json:"err"` + From *crossref.RefSource `json:"from"` + To *crossref.Ref `json:"to"` + Tenant string `json:"tenant"` +} + +// BatchReferences groups many Reference items together. The order matches the +// order from the original request. It can be turned into the expected response +// type using the .Response() method +type BatchReferences []BatchReference + +type BatchSimpleObject struct { + UUID strfmt.UUID + Err error +} + +type BatchSimpleObjects []BatchSimpleObject + +type BatchDeleteParams struct { + ClassName schema.ClassName `json:"className"` + Filters *filters.LocalFilter `json:"filters"` + DeletionTime time.Time + DryRun bool + Output string +} + +type BatchDeleteResult struct { + Matches int64 + Limit int64 + DeletionTime time.Time + DryRun bool + Objects BatchSimpleObjects +} + +type BatchDeleteResponse struct { + Match *models.BatchDeleteMatch + DeletionTime time.Time + DryRun bool + Output string + Params BatchDeleteParams + Result BatchDeleteResult +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/delete.go b/platform/dbops/binaries/weaviate-src/usecases/objects/delete.go new file mode 100644 index 0000000000000000000000000000000000000000..740772a24c524a12a21c7fde03545b0ce54f7d14 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/delete.go @@ -0,0 +1,118 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/weaviate/weaviate/entities/classcache" + "github.com/weaviate/weaviate/entities/schema" + + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + authzerrs "github.com/weaviate/weaviate/usecases/auth/authorization/errors" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +// DeleteObject Class Instance from the connected DB +// +// if class == "" it will delete all object with same id regardless of the class name. +// This is due to backward compatibility reasons and should be removed in the future +func (m *Manager) DeleteObject(ctx context.Context, + principal *models.Principal, className string, id strfmt.UUID, + repl *additional.ReplicationProperties, tenant string, +) error { + className = schema.UppercaseClassName(className) + className, _ = m.resolveAlias(className) + + err := m.authorizer.Authorize(ctx, principal, authorization.DELETE, authorization.Objects(className, tenant, id)) + if err != nil { + return err + } + ctx = classcache.ContextWithClassCache(ctx) + + if err := m.allocChecker.CheckAlloc(memwatch.EstimateObjectDeleteMemory()); err != nil { + m.logger.WithError(err).Errorf("memory pressure: cannot process delete object") + return fmt.Errorf("cannot process delete object: %w", err) + } + + m.metrics.DeleteObjectInc() + defer m.metrics.DeleteObjectDec() + + if className == "" { // deprecated + return m.deleteObjectFromRepo(ctx, id, time.UnixMilli(m.timeSource.Now())) + } + + // we only use the schemaVersion in this endpoint + fetchedClasses, err := m.schemaManager.GetCachedClassNoAuth(ctx, className) + if err != nil { + return fmt.Errorf("could not get class %s: %w", className, err) + } + + // Ensure that the local schema has caught up to the version we used to validate + if err := m.schemaManager.WaitForUpdate(ctx, fetchedClasses[className].Version); err != nil { + return fmt.Errorf("error waiting for local schema to catch up to version %d: %w", fetchedClasses[className].Version, err) + } + if err = m.vectorRepo.DeleteObject(ctx, className, id, time.UnixMilli(m.timeSource.Now()), repl, tenant, fetchedClasses[className].Version); err != nil { + var e1 ErrMultiTenancy + if errors.As(err, &e1) { + return NewErrMultiTenancy(fmt.Errorf("delete object from vector repo: %w", err)) + } + var e2 ErrInvalidUserInput + if errors.As(err, &e2) { + return NewErrMultiTenancy(fmt.Errorf("delete object from vector repo: %w", err)) + } + var e3 authzerrs.Forbidden + if errors.As(err, &e3) { + return fmt.Errorf("delete object from vector repo: %w", err) + } + return NewErrInternal("could not delete object from vector repo: %v", err) + } + + return nil +} + +// deleteObjectFromRepo deletes objects with same id and different classes. +// +// Deprecated +func (m *Manager) deleteObjectFromRepo(ctx context.Context, id strfmt.UUID, deletionTime time.Time) error { + // There might be a situation to have UUIDs which are not unique across classes. + // Added loop in order to delete all of the objects with given UUID across all classes. + // This change is added in response to this issue: + // https://github.com/weaviate/weaviate/issues/1836 + deleteCounter := 0 + for { + objectRes, err := m.getObjectFromRepo(ctx, "", id, additional.Properties{}, nil, "") + if err != nil { + if errors.As(err, &ErrNotFound{}) { + if deleteCounter == 0 { + return err + } + return nil + } + return err + } + + object := objectRes.Object() + err = m.vectorRepo.DeleteObject(ctx, object.Class, id, deletionTime, nil, "", 0) + if err != nil { + return NewErrInternal("could not delete object from vector repo: %v", err) + } + deleteCounter++ + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/delete_test.go b/platform/dbops/binaries/weaviate-src/usecases/objects/delete_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3487a1d5451ef9d80e38dc2968599a6e9f239c0c --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/delete_test.go @@ -0,0 +1,113 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "errors" + "testing" + + "github.com/go-openapi/strfmt" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/usecases/auth/authorization/mocks" + "github.com/weaviate/weaviate/usecases/config" +) + +func Test_DeleteObjectsWithSameId(t *testing.T) { + var ( + cls = "MyClass" + id = strfmt.UUID("5a1cd361-1e0d-42ae-bd52-ee09cb5f31cc") + ) + + manager, vectorRepo, _, _ := newDeleteDependency() + vectorRepo.On("ObjectByID", mock.Anything, mock.Anything, mock.Anything).Return(&search.Result{ + ClassName: cls, + }, nil).Once() + vectorRepo.On("ObjectByID", mock.Anything, mock.Anything, mock.Anything).Return(nil, nil).Once() + vectorRepo.On("DeleteObject", cls, id, mock.Anything).Return(nil).Once() + + err := manager.DeleteObject(context.Background(), nil, "", id, nil, "") + assert.Nil(t, err) + vectorRepo.AssertExpectations(t) +} + +func Test_DeleteObject(t *testing.T) { + var ( + cls = "MyClass" + id = strfmt.UUID("5a1cd361-1e0d-42ae-bd52-ee09cb5f31cc") + errNotFound = errors.New("object not found") + ) + + manager, repo, _, _ := newDeleteDependency() + + repo.On("DeleteObject", cls, id, mock.Anything).Return(nil).Once() + err := manager.DeleteObject(context.Background(), nil, cls, id, nil, "") + assert.Nil(t, err) + repo.AssertExpectations(t) + + // return internal error if deleteObject() fails + repo.On("DeleteObject", cls, id, mock.Anything).Return(errNotFound).Once() + err = manager.DeleteObject(context.Background(), nil, cls, id, nil, "") + if !errors.As(err, &ErrInternal{}) { + t.Errorf("error type got: %T want: ErrInternal", err) + } + repo.AssertExpectations(t) +} + +// TestDeleteObject_RbacResolveAlias is to make sure alias is resolved to correct +// collection before doing RBAC check on original class. +func TestDeleteObject_RbacResolveAlias(t *testing.T) { + manager, repo, auth, schema := newDeleteDependency() + + var ( + class = "SomeClass" + alias = "SomeAlias" + id = strfmt.UUID("5a1cd361-1e0d-42ae-bd52-ee09cb5f31cc") + ctx = context.Background() + ) + + repo.On("DeleteObject", class, id, mock.Anything).Return(nil).Once() + + // we mock `resolveAlias`. + // And we make sure the "resource" name we got in rbac's Authorize is + // what returned from `resolveAlias`, so that we can confirm, resolveAlias + // always happens before rbac authorize. + schema.resolveAliasTo = class + + err := manager.DeleteObject(ctx, nil, alias, id, nil, "") + require.NoError(t, err) + assert.Len(t, auth.Calls(), 1) + assert.Contains(t, auth.Calls()[0].Resources[0], class) // make sure rbac is called with "resolved class" name +} + +func newDeleteDependency() (*Manager, *fakeVectorRepo, *mocks.FakeAuthorizer, *fakeSchemaManager) { + vectorRepo := new(fakeVectorRepo) + logger, _ := test.NewNullLogger() + authorizer := mocks.NewMockAuthorizer() + smanager := new(fakeSchemaManager) + manager := NewManager( + smanager, + new(config.WeaviateConfig), + logger, + authorizer, + vectorRepo, + getFakeModulesProvider(), + new(fakeMetrics), nil, + NewAutoSchemaManager(new(fakeSchemaManager), vectorRepo, new(config.WeaviateConfig), mocks.NewMockAuthorizer(), logger, prometheus.NewPedanticRegistry())) + return manager, vectorRepo, authorizer, smanager +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/errors.go b/platform/dbops/binaries/weaviate-src/usecases/objects/errors.go new file mode 100644 index 0000000000000000000000000000000000000000..a628101d77ae30019b000083782b6f48a4ea6d43 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/errors.go @@ -0,0 +1,174 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "fmt" +) + +// objects status code +const ( + StatusForbidden = 403 + StatusBadRequest = 400 + StatusNotFound = 404 + StatusUnprocessableEntity = 422 + StatusInternalServerError = 500 +) + +type Error struct { + Msg string + Code int + Err error +} + +// Error implements error interface +func (e *Error) Error() string { + return fmt.Sprintf("msg:%s code:%v err:%v", e.Msg, e.Code, e.Err) +} + +// Unwrap underlying error +func (e *Error) Unwrap() error { + return e.Err +} + +func (e *Error) NotFound() bool { + return e.Code == StatusNotFound +} + +func (e *Error) Forbidden() bool { + return e.Code == StatusForbidden +} + +func (e *Error) BadRequest() bool { + return e.Code == StatusBadRequest +} + +func (e *Error) UnprocessableEntity() bool { + return e.Code == StatusUnprocessableEntity +} + +// ErrInvalidUserInput indicates a client-side error +type ErrInvalidUserInput struct { + msg string +} + +func (e ErrInvalidUserInput) Error() string { + return e.msg +} + +// NewErrInvalidUserInput with Errorf signature +func NewErrInvalidUserInput(format string, args ...interface{}) ErrInvalidUserInput { + return ErrInvalidUserInput{msg: fmt.Sprintf(format, args...)} +} + +// ErrInternal indicates something went wrong during processing +type ErrInternal struct { + msg string +} + +func (e ErrInternal) Error() string { + return e.msg +} + +// NewErrInternal with Errorf signature +func NewErrInternal(format string, args ...interface{}) ErrInternal { + return ErrInternal{msg: fmt.Sprintf(format, args...)} +} + +// ErrNotFound indicates the desired resource doesn't exist +type ErrNotFound struct { + msg string +} + +func (e ErrNotFound) Error() string { + return e.msg +} + +// NewErrNotFound with Errorf signature +func NewErrNotFound(format string, args ...interface{}) ErrNotFound { + return ErrNotFound{msg: fmt.Sprintf(format, args...)} +} + +type ErrMultiTenancy struct { + err error +} + +func (e ErrMultiTenancy) Error() string { + return e.err.Error() +} + +func (e ErrMultiTenancy) Unwrap() error { + return e.err +} + +// NewErrMultiTenancy with error signature +func NewErrMultiTenancy(err error) ErrMultiTenancy { + return ErrMultiTenancy{err} +} + +// This error is thrown by the replication logic when an object has either: +// +// 1. been deleted locally but exists remotely +// +// 2. been deleted remotely but exists locally +// +// signifying that the current operation is happening simultaneously to another operation +// on the same replicated resource. +// +// This error is used to bubble up the error from the replication logic so that it can be handled +// depending on the context of the higher level operation. +// +// This was introduced originally to handle +// cases where concurrent delete_many and single_patch operations were happening on the same object +// across multiple replicas. The read repair of the patch method would fail with a 500 conflict error +// if the delete operation was not propagated to all replicas before the patch operation was attempted. +// By using this error and handling it in func (m *Manager) MergeObject, any patch updates will assume that +// the object has been deleted everywhere, despite it only being deleted in one place, and will therefore +// return a 404 not found error. +type ErrDirtyReadOfDeletedObject struct { + err error +} + +func (e ErrDirtyReadOfDeletedObject) Error() string { + return e.err.Error() +} + +func (e ErrDirtyReadOfDeletedObject) Unwrap() error { + return e.err +} + +// It depends on the order of operations +// +// Created -> Deleted => It is safe in this case to propagate deletion to all replicas +// Created -> Deleted -> Created => propagating deletion will result in data lost +// +// Updated -> Deleted => It is safe in this case to propagate deletion to all replicas +// Updated -> Deleted -> Updated => It is also safe in this case since updating a deleted object makes no logical sense +func NewErrDirtyReadOfDeletedObject(err error) ErrDirtyReadOfDeletedObject { + return ErrDirtyReadOfDeletedObject{err} +} + +type ErrDirtyWriteOfDeletedObject struct { + err error +} + +func (e ErrDirtyWriteOfDeletedObject) Error() string { + return e.err.Error() +} + +func (e ErrDirtyWriteOfDeletedObject) Unwrap() error { + return e.err +} + +func NewErrDirtyWriteOfDeletedObject(err error) ErrDirtyWriteOfDeletedObject { + return ErrDirtyWriteOfDeletedObject{err} +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/fakes_for_test.go b/platform/dbops/binaries/weaviate-src/usecases/objects/fakes_for_test.go new file mode 100644 index 0000000000000000000000000000000000000000..07cdf76c205654a9d4fd2575a3849e7539f793a4 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/fakes_for_test.go @@ -0,0 +1,887 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/mock" + "github.com/tailor-inc/graphql" + "github.com/tailor-inc/graphql/language/ast" + + "github.com/weaviate/weaviate/adapters/handlers/graphql/descriptions" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/moduletools" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/entities/versioned" +) + +const FindObjectFn = "func(context.Context, string, strfmt.UUID, " + + "search.SelectProperties, additional.Properties, string) (*search.Result, error)" + +type fakeSchemaManager struct { + CalledWith struct { + fromClass string + property string + toClass string + } + GetSchemaResponse schema.Schema + GetschemaErr error + tenantsEnabled bool + resolveAliasTo string +} + +func (f *fakeSchemaManager) UpdatePropertyAddDataType(ctx context.Context, principal *models.Principal, + fromClass, property, toClass string, +) error { + f.CalledWith = struct { + fromClass string + property string + toClass string + }{ + fromClass: fromClass, + property: property, + toClass: toClass, + } + return nil +} + +func (f *fakeSchemaManager) GetSchema(principal *models.Principal) (schema.Schema, error) { + return f.GetSchemaResponse, f.GetschemaErr +} + +func (f *fakeSchemaManager) GetConsistentSchema(ctx context.Context, principal *models.Principal, consistency bool) (schema.Schema, error) { + return f.GetSchema(principal) +} + +func (f *fakeSchemaManager) ShardOwner(class, shard string) (string, error) { return "", nil } + +func (f *fakeSchemaManager) ShardFromUUID(class string, uuid []byte) string { return "" } + +func (f *fakeSchemaManager) GetClass(ctx context.Context, principal *models.Principal, + name string, +) (*models.Class, error) { + if f.GetSchemaResponse.Objects == nil { + return nil, f.GetschemaErr + } + for _, class := range f.GetSchemaResponse.Objects.Classes { + if class.Class == name { + return class, f.GetschemaErr + } + } + return nil, f.GetschemaErr +} + +func (f *fakeSchemaManager) GetConsistentClass(ctx context.Context, principal *models.Principal, + name string, consistency bool, +) (*models.Class, uint64, error) { + cls, err := f.GetClass(ctx, principal, name) + return cls, 0, err +} + +func (f *fakeSchemaManager) GetCachedClass(ctx context.Context, + principal *models.Principal, names ...string, +) (map[string]versioned.Class, error) { + res := map[string]versioned.Class{} + for _, name := range names { + cls, err := f.GetClass(ctx, principal, name) + if err != nil { + return res, err + } + res[name] = versioned.Class{Class: cls} + } + return res, nil +} + +func (f *fakeSchemaManager) GetCachedClassNoAuth(ctx context.Context, names ...string, +) (map[string]versioned.Class, error) { + res := map[string]versioned.Class{} + for _, name := range names { + cls, err := f.GetClass(ctx, nil, name) + if err != nil { + return res, err + } + res[name] = versioned.Class{Class: cls} + } + return f.GetCachedClass(ctx, nil, names...) +} + +func (f *fakeSchemaManager) ReadOnlyClass(name string) *models.Class { + c, err := f.GetClass(context.TODO(), nil, name) + if err != nil { + return nil + } + return c +} + +func (f *fakeSchemaManager) AddClass(ctx context.Context, principal *models.Principal, + class *models.Class, +) (*models.Class, uint64, error) { + if f.GetSchemaResponse.Objects == nil { + f.GetSchemaResponse.Objects = schema.Empty().Objects + } + class.VectorIndexConfig = hnsw.UserConfig{} + class.VectorIndexType = "hnsw" + class.Vectorizer = "none" + classes := f.GetSchemaResponse.Objects.Classes + if classes != nil { + classes = append(classes, class) + } else { + classes = []*models.Class{class} + } + f.GetSchemaResponse.Objects.Classes = classes + return class, 0, nil +} + +func (f *fakeSchemaManager) AddClassProperty(ctx context.Context, principal *models.Principal, + class *models.Class, className string, merge bool, newProps ...*models.Property, +) (*models.Class, uint64, error) { + existing := map[string]int{} + var existedClass *models.Class + for _, c := range f.GetSchemaResponse.Objects.Classes { + if c.Class == class.Class { + existedClass = c + for idx, p := range c.Properties { + existing[strings.ToLower(p.Name)] = idx + } + break + } + } + + // update existed + for _, prop := range newProps { + if idx, exists := existing[strings.ToLower(prop.Name)]; exists { + prop.NestedProperties, _ = schema.MergeRecursivelyNestedProperties(existedClass.Properties[idx].NestedProperties, + prop.NestedProperties) + existedClass.Properties[idx] = prop + } else { + existedClass.Properties = append(existedClass.Properties, prop) + } + } + + return class, 0, nil +} + +func (f *fakeSchemaManager) AddTenants(ctx context.Context, + principal *models.Principal, class string, tenants []*models.Tenant, +) (uint64, error) { + f.tenantsEnabled = true + return 0, nil +} + +func (f *fakeSchemaManager) WaitForUpdate(ctx context.Context, schemaVersion uint64) error { + return nil +} + +func (f *fakeSchemaManager) StorageCandidates() []string { + return []string{} +} + +func (f *fakeSchemaManager) ResolveAlias(alias string) string { + return f.resolveAliasTo +} + +type fakeVectorRepo struct { + mock.Mock +} + +func (f *fakeVectorRepo) Exists(ctx context.Context, class string, id strfmt.UUID, repl *additional.ReplicationProperties, tenant string) (bool, error) { + args := f.Called(class, id) + return args.Bool(0), args.Error(1) +} + +func (f *fakeVectorRepo) Object(ctx context.Context, cls string, id strfmt.UUID, + props search.SelectProperties, additional additional.Properties, + repl *additional.ReplicationProperties, tenant string, +) (*search.Result, error) { + args := f.Called(cls, id, props, additional, tenant) + if args.Get(0) != nil { + return args.Get(0).(*search.Result), args.Error(1) + } + return nil, args.Error(1) +} + +func (f *fakeVectorRepo) ObjectByID(ctx context.Context, id strfmt.UUID, + props search.SelectProperties, additional additional.Properties, + tenant string, +) (*search.Result, error) { + args := f.Called(id, props, additional) + if args.Get(0) != nil { + return args.Get(0).(*search.Result), args.Error(1) + } + return nil, args.Error(1) +} + +func (f *fakeVectorRepo) ObjectSearch(ctx context.Context, offset, limit int, filters *filters.LocalFilter, + sort []filters.Sort, additional additional.Properties, tenant string, +) (search.Results, error) { + args := f.Called(offset, limit, sort, filters, additional) + return args.Get(0).([]search.Result), args.Error(1) +} + +func (f *fakeVectorRepo) Query(ctx context.Context, q *QueryInput) (search.Results, *Error) { + args := f.Called(q) + var customEr *Error + errors.As(args.Error(1), &customEr) + return args.Get(0).([]search.Result), customEr +} + +func (f *fakeVectorRepo) PutObject(ctx context.Context, concept *models.Object, vector []float32, + vectors map[string][]float32, multiVectors map[string][][]float32, repl *additional.ReplicationProperties, schemaVersion uint64, +) error { + args := f.Called(concept, vector) + return args.Error(0) +} + +func (f *fakeVectorRepo) BatchPutObjects(ctx context.Context, batch BatchObjects, + repl *additional.ReplicationProperties, schemaVersion uint64, +) (BatchObjects, error) { + args := f.Called(batch) + return batch, args.Error(0) +} + +func (f *fakeVectorRepo) AddBatchReferences(ctx context.Context, batch BatchReferences, + repl *additional.ReplicationProperties, schemaVersion uint64, +) (BatchReferences, error) { + args := f.Called(batch) + return batch, args.Error(0) +} + +func (f *fakeVectorRepo) BatchDeleteObjects(ctx context.Context, params BatchDeleteParams, + deletionTime time.Time, repl *additional.ReplicationProperties, tenant string, schemaVersion uint64, +) (BatchDeleteResult, error) { + args := f.Called(params) + return args.Get(0).(BatchDeleteResult), args.Error(1) +} + +func (f *fakeVectorRepo) Merge(ctx context.Context, merge MergeDocument, repl *additional.ReplicationProperties, tenant string, schemaVersion uint64) error { + args := f.Called(merge) + return args.Error(0) +} + +func (f *fakeVectorRepo) DeleteObject(ctx context.Context, className string, + id strfmt.UUID, deletionTime time.Time, repl *additional.ReplicationProperties, tenant string, schemaVersion uint64, +) error { + args := f.Called(className, id, deletionTime) + return args.Error(0) +} + +func (f *fakeVectorRepo) AddReference(ctx context.Context, source *crossref.RefSource, + target *crossref.Ref, repl *additional.ReplicationProperties, tenant string, schemaVersion uint64, +) error { + args := f.Called(source, target) + return args.Error(0) +} + +func (f *fakeVectorRepo) ReferenceVectorSearch(ctx context.Context, + obj *models.Object, refProps map[string]struct{}, +) ([][]float32, error) { + return nil, nil +} + +type fakeExtender struct { + multi []search.Result +} + +func (f *fakeExtender) AdditionalPropertyFn(ctx context.Context, + in []search.Result, params interface{}, limit *int, + argumentModuleParams map[string]interface{}, cfg moduletools.ClassConfig, +) ([]search.Result, error) { + return f.multi, nil +} + +func (f *fakeExtender) ExtractAdditionalFn(param []*ast.Argument, class *models.Class) interface{} { + return nil +} + +func (f *fakeExtender) AdditionalPropertyDefaultValue() interface{} { + return getDefaultParam("nearestNeighbors") +} + +type fakeProjector struct { + multi []search.Result +} + +func (f *fakeProjector) AdditionalPropertyFn(ctx context.Context, + in []search.Result, params interface{}, limit *int, + argumentModuleParams map[string]interface{}, cfg moduletools.ClassConfig, +) ([]search.Result, error) { + return f.multi, nil +} + +func (f *fakeProjector) ExtractAdditionalFn(param []*ast.Argument, class *models.Class) interface{} { + return nil +} + +func (f *fakeProjector) AdditionalPropertyDefaultValue() interface{} { + return getDefaultParam("featureProjection") +} + +type fakePathBuilder struct { + multi []search.Result +} + +func (f *fakePathBuilder) AdditionalPropertyFn(ctx context.Context, + in []search.Result, params interface{}, limit *int, + argumentModuleParams map[string]interface{}, cfg moduletools.ClassConfig, +) ([]search.Result, error) { + return f.multi, nil +} + +func (f *fakePathBuilder) ExtractAdditionalFn(param []*ast.Argument, class *models.Class) interface{} { + return nil +} + +func (f *fakePathBuilder) AdditionalPropertyDefaultValue() interface{} { + return getDefaultParam("semanticPath") +} + +type fakeModulesProvider struct { + mock.Mock + customExtender *fakeExtender + customProjector *fakeProjector +} + +func (p *fakeModulesProvider) GetObjectAdditionalExtend(ctx context.Context, + in *search.Result, moduleParams map[string]interface{}, +) (*search.Result, error) { + res, err := p.additionalExtend(ctx, search.Results{*in}, moduleParams, "ObjectGet") + if err != nil { + return nil, err + } + return &res[0], nil +} + +func (p *fakeModulesProvider) ListObjectsAdditionalExtend(ctx context.Context, + in search.Results, moduleParams map[string]interface{}, +) (search.Results, error) { + return p.additionalExtend(ctx, in, moduleParams, "ObjectList") +} + +func (p *fakeModulesProvider) UsingRef2Vec(moduleName string) bool { + args := p.Called(moduleName) + return args.Bool(0) +} + +func (p *fakeModulesProvider) UpdateVector(ctx context.Context, object *models.Object, class *models.Class, + findObjFn modulecapabilities.FindObjectFn, logger logrus.FieldLogger, +) error { + args := p.Called(object, findObjFn) + switch vec := args.Get(0).(type) { + case models.C11yVector: + object.Vector = vec + return args.Error(1) + case []float32: + object.Vector = vec + return args.Error(1) + default: + return args.Error(1) + } +} + +func (p *fakeModulesProvider) BatchUpdateVector(ctx context.Context, class *models.Class, objects []*models.Object, + findObjectFn modulecapabilities.FindObjectFn, + logger logrus.FieldLogger, +) (map[int]error, error) { + args := p.Called() + + for _, obj := range objects { + switch vec := args.Get(0).(type) { + case models.C11yVector: + obj.Vector = vec + case []float32: + obj.Vector = vec + default: + } + } + + return nil, nil +} + +func (p *fakeModulesProvider) VectorizerName(className string) (string, error) { + args := p.Called(className) + return args.String(0), args.Error(1) +} + +func (p *fakeModulesProvider) additionalExtend(ctx context.Context, + in search.Results, moduleParams map[string]interface{}, capability string, +) (search.Results, error) { + txt2vec := newNearCustomTextModule(p.getExtender(), p.getProjector(), &fakePathBuilder{}) + additionalProperties := txt2vec.AdditionalProperties() + if err := p.checkCapabilities(additionalProperties, moduleParams, capability); err != nil { + return nil, err + } + for name, value := range moduleParams { + additionalPropertyFn := p.getAdditionalPropertyFn(additionalProperties[name], capability) + if additionalPropertyFn != nil && value != nil { + resArray, err := additionalPropertyFn(ctx, in, nil, nil, nil, nil) + if err != nil { + return nil, err + } + in = resArray + } + } + return in, nil +} + +func (p *fakeModulesProvider) checkCapabilities(additionalProperties map[string]modulecapabilities.AdditionalProperty, + moduleParams map[string]interface{}, capability string, +) error { + for name := range moduleParams { + additionalPropertyFn := p.getAdditionalPropertyFn(additionalProperties[name], capability) + if additionalPropertyFn == nil { + return errors.Errorf("unknown capability: %s", name) + } + } + return nil +} + +func (p *fakeModulesProvider) getAdditionalPropertyFn(additionalProperty modulecapabilities.AdditionalProperty, + capability string, +) modulecapabilities.AdditionalPropertyFn { + switch capability { + case "ObjectGet": + return additionalProperty.SearchFunctions.ObjectGet + case "ObjectList": + return additionalProperty.SearchFunctions.ObjectList + case "ExploreGet": + return additionalProperty.SearchFunctions.ExploreGet + case "ExploreList": + return additionalProperty.SearchFunctions.ExploreList + default: + return nil + } +} + +func (p *fakeModulesProvider) getExtender() *fakeExtender { + if p.customExtender != nil { + return p.customExtender + } + return &fakeExtender{} +} + +func (p *fakeModulesProvider) getProjector() *fakeProjector { + if p.customProjector != nil { + return p.customProjector + } + return &fakeProjector{} +} + +type nearCustomTextParams struct { + Values []string + MoveTo nearExploreMove + Certainty float64 +} + +type nearExploreMove struct { + Values []string + Force float32 + Objects []nearObjectMove +} + +type nearObjectMove struct { + ID string + Beacon string +} + +type nearCustomTextModule struct { + fakeExtender *fakeExtender + fakeProjector *fakeProjector + fakePathBuilder *fakePathBuilder +} + +func newNearCustomTextModule( + fakeExtender *fakeExtender, + fakeProjector *fakeProjector, + fakePathBuilder *fakePathBuilder, +) *nearCustomTextModule { + return &nearCustomTextModule{fakeExtender, fakeProjector, fakePathBuilder} +} + +func (m *nearCustomTextModule) Name() string { + return "mock-custom-near-text-module" +} + +func (m *nearCustomTextModule) Init(params moduletools.ModuleInitParams) error { + return nil +} + +func (m *nearCustomTextModule) getNearCustomTextArgument(classname string) *graphql.ArgumentConfig { + prefix := classname + return &graphql.ArgumentConfig{ + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sNearCustomTextInpObj", prefix), + Fields: graphql.InputObjectConfigFieldMap{ + "concepts": &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(graphql.NewList(graphql.String)), + }, + "moveTo": &graphql.InputObjectFieldConfig{ + Description: descriptions.VectorMovement, + Type: graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sMoveTo", prefix), + Fields: graphql.InputObjectConfigFieldMap{ + "concepts": &graphql.InputObjectFieldConfig{ + Description: descriptions.Keywords, + Type: graphql.NewList(graphql.String), + }, + "objects": &graphql.InputObjectFieldConfig{ + Description: "objects", + Type: graphql.NewList(graphql.NewInputObject( + graphql.InputObjectConfig{ + Name: fmt.Sprintf("%sMovementObjectsInpObj", prefix), + Fields: graphql.InputObjectConfigFieldMap{ + "id": &graphql.InputObjectFieldConfig{ + Type: graphql.String, + Description: "id of an object", + }, + "beacon": &graphql.InputObjectFieldConfig{ + Type: graphql.String, + Description: descriptions.Beacon, + }, + }, + Description: "Movement Object", + }, + )), + }, + "force": &graphql.InputObjectFieldConfig{ + Description: descriptions.Force, + Type: graphql.NewNonNull(graphql.Float), + }, + }, + }), + }, + "certainty": &graphql.InputObjectFieldConfig{ + Description: descriptions.Certainty, + Type: graphql.Float, + }, + }, + Description: descriptions.GetWhereInpObj, + }, + ), + } +} + +func (m *nearCustomTextModule) extractNearCustomTextArgument(source map[string]interface{}) *nearCustomTextParams { + var args nearCustomTextParams + + concepts := source["concepts"].([]interface{}) + args.Values = make([]string, len(concepts)) + for i, value := range concepts { + args.Values[i] = value.(string) + } + + certainty, ok := source["certainty"] + if ok { + args.Certainty = certainty.(float64) + } + + // moveTo is an optional arg, so it could be nil + moveTo, ok := source["moveTo"] + if ok { + moveToMap := moveTo.(map[string]interface{}) + res := nearExploreMove{} + res.Force = float32(moveToMap["force"].(float64)) + + concepts, ok := moveToMap["concepts"].([]interface{}) + if ok { + res.Values = make([]string, len(concepts)) + for i, value := range concepts { + res.Values[i] = value.(string) + } + } + + objects, ok := moveToMap["objects"].([]interface{}) + if ok { + res.Objects = make([]nearObjectMove, len(objects)) + for i, value := range objects { + v, ok := value.(map[string]interface{}) + if ok { + if v["id"] != nil { + res.Objects[i].ID = v["id"].(string) + } + if v["beacon"] != nil { + res.Objects[i].Beacon = v["beacon"].(string) + } + } + } + } + + args.MoveTo = res + } + + return &args +} + +func (m *nearCustomTextModule) Arguments() map[string]modulecapabilities.GraphQLArgument { + arguments := map[string]modulecapabilities.GraphQLArgument{} + // define nearCustomText argument + arguments["nearCustomText"] = modulecapabilities.GraphQLArgument{ + GetArgumentsFunction: func(classname string) *graphql.ArgumentConfig { + return m.getNearCustomTextArgument(classname) + }, + ExtractFunction: func(source map[string]interface{}) (interface{}, *dto.TargetCombination, error) { + params := m.extractNearCustomTextArgument(source) + return params, nil, nil + }, + ValidateFunction: func(param interface{}) error { + // all is valid + return nil + }, + } + return arguments +} + +// additional properties +func (m *nearCustomTextModule) AdditionalProperties() map[string]modulecapabilities.AdditionalProperty { + additionalProperties := map[string]modulecapabilities.AdditionalProperty{} + additionalProperties["featureProjection"] = m.getFeatureProjection() + additionalProperties["nearestNeighbors"] = m.getNearestNeighbors() + additionalProperties["semanticPath"] = m.getSemanticPath() + return additionalProperties +} + +func (m *nearCustomTextModule) getFeatureProjection() modulecapabilities.AdditionalProperty { + return modulecapabilities.AdditionalProperty{ + DefaultValue: m.fakeProjector.AdditionalPropertyDefaultValue(), + GraphQLNames: []string{"featureProjection"}, + GraphQLFieldFunction: func(classname string) *graphql.Field { + return &graphql.Field{ + Args: graphql.FieldConfigArgument{ + "algorithm": &graphql.ArgumentConfig{ + Type: graphql.String, + DefaultValue: nil, + }, + "dimensions": &graphql.ArgumentConfig{ + Type: graphql.Int, + DefaultValue: nil, + }, + "learningRate": &graphql.ArgumentConfig{ + Type: graphql.Int, + DefaultValue: nil, + }, + "iterations": &graphql.ArgumentConfig{ + Type: graphql.Int, + DefaultValue: nil, + }, + "perplexity": &graphql.ArgumentConfig{ + Type: graphql.Int, + DefaultValue: nil, + }, + }, + Type: graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%sAdditionalFeatureProjection", classname), + Fields: graphql.Fields{ + "vector": &graphql.Field{Type: graphql.NewList(graphql.Float)}, + }, + }), + } + }, + GraphQLExtractFunction: m.fakeProjector.ExtractAdditionalFn, + SearchFunctions: modulecapabilities.AdditionalSearch{ + ObjectList: m.fakeProjector.AdditionalPropertyFn, + ExploreGet: m.fakeProjector.AdditionalPropertyFn, + ExploreList: m.fakeProjector.AdditionalPropertyFn, + }, + } +} + +func (m *nearCustomTextModule) getNearestNeighbors() modulecapabilities.AdditionalProperty { + return modulecapabilities.AdditionalProperty{ + DefaultValue: m.fakeExtender.AdditionalPropertyDefaultValue(), + GraphQLNames: []string{"nearestNeighbors"}, + GraphQLFieldFunction: func(classname string) *graphql.Field { + return &graphql.Field{ + Type: graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%sAdditionalNearestNeighbors", classname), + Fields: graphql.Fields{ + "neighbors": &graphql.Field{Type: graphql.NewList(graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%sAdditionalNearestNeighborsNeighbors", classname), + Fields: graphql.Fields{ + "concept": &graphql.Field{Type: graphql.String}, + "distance": &graphql.Field{Type: graphql.Float}, + }, + }))}, + }, + }), + } + }, + GraphQLExtractFunction: m.fakeExtender.ExtractAdditionalFn, + SearchFunctions: modulecapabilities.AdditionalSearch{ + ObjectGet: m.fakeExtender.AdditionalPropertyFn, + ObjectList: m.fakeExtender.AdditionalPropertyFn, + ExploreGet: m.fakeExtender.AdditionalPropertyFn, + ExploreList: m.fakeExtender.AdditionalPropertyFn, + }, + } +} + +func (m *nearCustomTextModule) getSemanticPath() modulecapabilities.AdditionalProperty { + return modulecapabilities.AdditionalProperty{ + DefaultValue: m.fakePathBuilder.AdditionalPropertyDefaultValue(), + GraphQLNames: []string{"semanticPath"}, + GraphQLFieldFunction: func(classname string) *graphql.Field { + return &graphql.Field{ + Type: graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%sAdditionalSemanticPath", classname), + Fields: graphql.Fields{ + "path": &graphql.Field{Type: graphql.NewList(graphql.NewObject(graphql.ObjectConfig{ + Name: fmt.Sprintf("%sAdditionalSemanticPathElement", classname), + Fields: graphql.Fields{ + "concept": &graphql.Field{Type: graphql.String}, + "distanceToQuery": &graphql.Field{Type: graphql.Float}, + "distanceToResult": &graphql.Field{Type: graphql.Float}, + "distanceToNext": &graphql.Field{Type: graphql.Float}, + "distanceToPrevious": &graphql.Field{Type: graphql.Float}, + }, + }))}, + }, + }), + } + }, + GraphQLExtractFunction: m.fakePathBuilder.ExtractAdditionalFn, + SearchFunctions: modulecapabilities.AdditionalSearch{ + ExploreGet: m.fakePathBuilder.AdditionalPropertyFn, + }, + } +} + +type fakeParams struct{} + +func getDefaultParam(name string) interface{} { + switch name { + case "featureProjection", "semanticPath": + return &fakeParams{} + case "nearestNeighbors": + return true + default: + return nil + } +} + +func getFakeModulesProvider(opts ...func(p *fakeModulesProvider)) *fakeModulesProvider { + p := &fakeModulesProvider{} + p.applyOptions(opts...) + return p +} + +func (p *fakeModulesProvider) applyOptions(opts ...func(provider *fakeModulesProvider)) { + for _, opt := range opts { + opt(p) + } +} + +func getFakeModulesProviderWithCustomExtenders( + customExtender *fakeExtender, + customProjector *fakeProjector, + opts ...func(provider *fakeModulesProvider), +) *fakeModulesProvider { + p := &fakeModulesProvider{mock.Mock{}, customExtender, customProjector} + p.applyOptions(opts...) + return p +} + +type fakeMetrics struct { + // Note: only those metric functions that relate to usage-related metrics are + // covered by this mock, others are empty shells + mock.Mock +} + +func (f *fakeMetrics) BatchInc() { +} + +func (f *fakeMetrics) BatchDec() { +} + +func (f *fakeMetrics) BatchRefInc() { +} + +func (f *fakeMetrics) BatchRefDec() { +} + +func (f *fakeMetrics) BatchDeleteInc() { +} + +func (f *fakeMetrics) BatchDeleteDec() { +} + +func (f *fakeMetrics) AddObjectInc() { +} + +func (f *fakeMetrics) AddObjectDec() { +} + +func (f *fakeMetrics) UpdateObjectInc() { +} + +func (f *fakeMetrics) UpdateObjectDec() { +} + +func (f *fakeMetrics) MergeObjectInc() { +} + +func (f *fakeMetrics) MergeObjectDec() { +} + +func (f *fakeMetrics) DeleteObjectInc() { +} + +func (f *fakeMetrics) DeleteObjectDec() { +} + +func (f *fakeMetrics) GetObjectInc() { +} + +func (f *fakeMetrics) GetObjectDec() { +} + +func (f *fakeMetrics) HeadObjectInc() { +} + +func (f *fakeMetrics) HeadObjectDec() { +} + +func (f *fakeMetrics) AddReferenceInc() { +} + +func (f *fakeMetrics) AddReferenceDec() { +} + +func (f *fakeMetrics) UpdateReferenceInc() { +} + +func (f *fakeMetrics) UpdateReferenceDec() { +} + +func (f *fakeMetrics) DeleteReferenceInc() { +} + +func (f *fakeMetrics) DeleteReferenceDec() { +} + +func (f *fakeMetrics) AddUsageDimensions(className, queryType, op string, dims int) { + f.Mock.MethodCalled("AddUsageDimensions", className, queryType, op, dims) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/get.go b/platform/dbops/binaries/weaviate-src/usecases/objects/get.go new file mode 100644 index 0000000000000000000000000000000000000000..ded86cbcb22c5a1eaca5476228722228879e04b6 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/get.go @@ -0,0 +1,274 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/usecases/auth/authorization" + authzerrs "github.com/weaviate/weaviate/usecases/auth/authorization/errors" + "github.com/weaviate/weaviate/usecases/auth/authorization/filter" +) + +// GetObject Class from the connected DB +func (m *Manager) GetObject(ctx context.Context, principal *models.Principal, + class string, id strfmt.UUID, additional additional.Properties, + replProps *additional.ReplicationProperties, tenant string, +) (*models.Object, error) { + class = schema.UppercaseClassName(class) + class, _ = m.resolveAlias(class) + + err := m.authorizer.Authorize(ctx, principal, authorization.READ, authorization.Objects(class, tenant, id)) + if err != nil { + return nil, err + } + + m.metrics.GetObjectInc() + defer m.metrics.GetObjectDec() + + res, err := m.getObjectFromRepo(ctx, class, id, additional, replProps, tenant) + if err != nil { + return nil, err + } + + if additional.Vector { + m.trackUsageSingle(res) + } + + obj := res.ObjectWithVector(additional.Vector) + return obj, nil +} + +// GetObjects Class from the connected DB +func (m *Manager) GetObjects(ctx context.Context, principal *models.Principal, + offset *int64, limit *int64, sort *string, order *string, after *string, + addl additional.Properties, tenant string, +) ([]*models.Object, error) { + err := m.authorizer.Authorize(ctx, principal, authorization.READ, authorization.Objects("", tenant, "")) + if err != nil { + return nil, err + } + + m.metrics.GetObjectInc() + defer m.metrics.GetObjectDec() + + objects, err := m.getObjectsFromRepo(ctx, offset, limit, sort, order, after, addl, tenant) + if err != nil { + return nil, err + } + + // Filter objects based on authorization + resourceFilter := filter.New[*models.Object](m.authorizer, m.config.Config.Authorization.Rbac) + filteredObjects := resourceFilter.Filter( + ctx, + m.logger, + principal, + objects, + authorization.READ, + func(obj *models.Object) string { + return authorization.Objects(obj.Class, tenant, obj.ID) + }, + ) + + return filteredObjects, nil +} + +func (m *Manager) GetObjectsClass(ctx context.Context, principal *models.Principal, + id strfmt.UUID, +) (*models.Class, error) { + err := m.authorizer.Authorize(ctx, principal, authorization.READ, authorization.Objects("", "", id)) + if err != nil { + return nil, err + } + + m.metrics.GetObjectInc() + defer m.metrics.GetObjectDec() + + res, err := m.getObjectFromRepo(ctx, "", id, additional.Properties{}, nil, "") + if err != nil { + return nil, err + } + + class, err := m.schemaManager.GetClass(ctx, principal, res.ClassName) + return class, err +} + +func (m *Manager) GetObjectClassFromName(ctx context.Context, principal *models.Principal, + className string, +) (*models.Class, error) { + class, err := m.schemaManager.GetClass(ctx, principal, className) + return class, err +} + +func (m *Manager) getObjectFromRepo(ctx context.Context, class string, id strfmt.UUID, + adds additional.Properties, repl *additional.ReplicationProperties, tenant string, +) (res *search.Result, err error) { + if class != "" { + if cls := m.schemaManager.ResolveAlias(class); cls != "" { + class = cls + } + res, err = m.vectorRepo.Object(ctx, class, id, search.SelectProperties{}, adds, repl, tenant) + } else { + res, err = m.vectorRepo.ObjectByID(ctx, id, search.SelectProperties{}, adds, tenant) + } + if err != nil { + switch { + case errors.As(err, &ErrMultiTenancy{}): + return nil, NewErrMultiTenancy(fmt.Errorf("repo: object by id: %w", err)) + default: + if errors.As(err, &authzerrs.Forbidden{}) { + return nil, fmt.Errorf("repo: object by id: %w", err) + } + return nil, NewErrInternal("repo: object by id: %v", err) + } + } + + if res == nil { + return nil, NewErrNotFound("no object with id '%s'", id) + } + + if m.modulesProvider != nil { + res, err = m.modulesProvider.GetObjectAdditionalExtend(ctx, res, adds.ModuleParams) + if err != nil { + return nil, fmt.Errorf("get extend: %w", err) + } + } + + return res, nil +} + +func (m *Manager) getObjectsFromRepo(ctx context.Context, + offset, limit *int64, sort, order *string, after *string, + additional additional.Properties, tenant string, +) ([]*models.Object, error) { + smartOffset, smartLimit, err := m.localOffsetLimit(offset, limit) + if err != nil { + return nil, NewErrInternal("list objects: %v", err) + } + if after != nil { + return nil, NewErrInternal("list objects: after parameter not allowed, cursor must be specific to one class, set class query param") + } + res, err := m.vectorRepo.ObjectSearch(ctx, smartOffset, smartLimit, + nil, m.getSort(sort, order), additional, tenant) + if err != nil { + return nil, NewErrInternal("list objects: %v", err) + } + + if m.modulesProvider != nil { + res, err = m.modulesProvider.ListObjectsAdditionalExtend(ctx, res, additional.ModuleParams) + if err != nil { + return nil, NewErrInternal("list extend: %v", err) + } + } + + if additional.Vector { + m.trackUsageList(res) + } + + return res.ObjectsWithVector(additional.Vector), nil +} + +func (m *Manager) getSort(sort, order *string) []filters.Sort { + if sort != nil { + sortParams := strings.Split(*sort, ",") + var orderParams []string + if order != nil { + orderParams = strings.Split(*order, ",") + } + var res []filters.Sort + for i := range sortParams { + res = append(res, filters.Sort{ + Path: []string{sortParams[i]}, + Order: m.getOrder(orderParams, i), + }) + } + return res + } + return nil +} + +func (m *Manager) getOrder(order []string, i int) string { + if len(order) > i { + switch order[i] { + case "asc", "desc": + return order[i] + default: + return "asc" + } + } + return "asc" +} + +func (m *Manager) localOffsetOrZero(paramOffset *int64) int { + offset := int64(0) + if paramOffset != nil { + offset = *paramOffset + } + + return int(offset) +} + +func (m *Manager) localLimitOrGlobalLimit(offset int64, paramMaxResults *int64) int { + limit := int64(m.config.Config.QueryDefaults.Limit) + // Get the max results from params, if exists + if paramMaxResults != nil { + limit = *paramMaxResults + } + + return int(limit) +} + +func (m *Manager) localOffsetLimit(paramOffset *int64, paramLimit *int64) (int, int, error) { + offset := m.localOffsetOrZero(paramOffset) + limit := m.localLimitOrGlobalLimit(int64(offset), paramLimit) + + if int64(offset+limit) > m.config.Config.QueryMaximumResults { + return 0, 0, errors.New("query maximum results exceeded") + } + + return offset, limit, nil +} + +func (m *Manager) trackUsageSingle(res *search.Result) { + if res == nil { + return + } + m.metrics.AddUsageDimensions(res.ClassName, "get_rest", "single_include_vector", res.Dims) +} + +func (m *Manager) trackUsageList(res search.Results) { + if len(res) == 0 { + return + } + m.metrics.AddUsageDimensions(res[0].ClassName, "get_rest", "list_include_vector", res[0].Dims) +} + +func (m *Manager) getCursor(after *string, limit *int64) *filters.Cursor { + if after != nil { + if limit == nil { + // limit -1 means that no limit param was set + return &filters.Cursor{After: *after, Limit: -1} + } + return &filters.Cursor{After: *after, Limit: int(*limit)} + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/get_test.go b/platform/dbops/binaries/weaviate-src/usecases/objects/get_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ce79b95447a314e0706d3807677a84cd4af8d68f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/get_test.go @@ -0,0 +1,1099 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "errors" + "testing" + + "github.com/go-openapi/strfmt" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/usecases/auth/authorization/mocks" + "github.com/weaviate/weaviate/usecases/config" +) + +func Test_GetAction(t *testing.T) { + var ( + vectorRepo *fakeVectorRepo + manager *Manager + extender *fakeExtender + projectorFake *fakeProjector + metrics *fakeMetrics + ) + + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "ActionClass", + }, + }, + }, + } + + reset := func() { + vectorRepo = &fakeVectorRepo{} + schemaManager := &fakeSchemaManager{ + GetSchemaResponse: schema, + } + cfg := &config.WeaviateConfig{} + cfg.Config.QueryDefaults.Limit = 20 + cfg.Config.QueryMaximumResults = 200 + authorizer := mocks.NewMockAuthorizer() + logger, _ := test.NewNullLogger() + extender = &fakeExtender{} + projectorFake = &fakeProjector{} + metrics = &fakeMetrics{} + manager = NewManager(schemaManager, cfg, logger, + authorizer, vectorRepo, + getFakeModulesProviderWithCustomExtenders(extender, projectorFake), metrics, nil, + NewAutoSchemaManager(schemaManager, vectorRepo, cfg, authorizer, logger, prometheus.NewPedanticRegistry())) + } + + t.Run("get non-existing action by id", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + + vectorRepo.On("ObjectByID", id, mock.Anything, mock.Anything).Return((*search.Result)(nil), nil).Once() + + _, err := manager.GetObject(context.Background(), &models.Principal{}, "", + id, additional.Properties{}, nil, "") + assert.Equal(t, NewErrNotFound("no object with id '99ee9968-22ec-416a-9032-cff80f2f7fdf'"), err) + }) + + t.Run("get existing action by id", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + + result := &search.Result{ + ID: id, + ClassName: "ActionClass", + Schema: map[string]interface{}{"foo": "bar"}, + } + vectorRepo.On("ObjectByID", id, mock.Anything, mock.Anything).Return(result, nil).Once() + + expected := &models.Object{ + ID: id, + Class: "ActionClass", + Properties: map[string]interface{}{"foo": "bar"}, + VectorWeights: (map[string]string)(nil), + } + + res, err := manager.GetObject(context.Background(), &models.Principal{}, "", + id, additional.Properties{}, nil, "") + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + + t.Run("get existing object by id with vector without classname (deprecated)", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + + result := &search.Result{ + ID: id, + ClassName: "ActionClass", + Schema: map[string]interface{}{"foo": "bar"}, + Vector: []float32{1, 2, 3}, + Dims: 3, + } + vectorRepo.On("ObjectByID", id, mock.Anything, mock.Anything).Return(result, nil).Once() + + expected := &models.Object{ + ID: id, + Class: "ActionClass", + Properties: map[string]interface{}{"foo": "bar"}, + VectorWeights: (map[string]string)(nil), + Vector: []float32{1, 2, 3}, + } + + metrics.On("AddUsageDimensions", "ActionClass", "get_rest", "single_include_vector", 3) + + res, err := manager.GetObject(context.Background(), &models.Principal{}, "", + id, additional.Properties{Vector: true}, nil, "") + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + + t.Run("get existing object by id with vector with classname", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + + result := &search.Result{ + ID: id, + ClassName: "ActionClass", + Schema: map[string]interface{}{"foo": "bar"}, + Vector: []float32{1, 2, 3}, + Dims: 3, + } + vectorRepo.On("Object", "ActionClass", id, mock.Anything, mock.Anything, ""). + Return(result, nil).Once() + + expected := &models.Object{ + ID: id, + Class: "ActionClass", + Properties: map[string]interface{}{"foo": "bar"}, + VectorWeights: (map[string]string)(nil), + Vector: []float32{1, 2, 3}, + } + + metrics.On("AddUsageDimensions", "ActionClass", "get_rest", "single_include_vector", 3) + + res, err := manager.GetObject(context.Background(), &models.Principal{}, + "ActionClass", id, additional.Properties{Vector: true}, nil, "") + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + + t.Run("list all existing actions with all default pagination settings", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + + results := []search.Result{ + { + ID: id, + ClassName: "ActionClass", + Schema: map[string]interface{}{"foo": "bar"}, + }, + } + vectorRepo.On("ObjectSearch", 0, 20, mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(results, nil).Once() + + expected := []*models.Object{ + { + ID: id, + Class: "ActionClass", + Properties: map[string]interface{}{"foo": "bar"}, + VectorWeights: (map[string]string)(nil), + }, + } + + res, err := manager.GetObjects(context.Background(), &models.Principal{}, nil, nil, nil, nil, nil, additional.Properties{}, "") + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + + t.Run("list all existing objects with vectors", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + + results := []search.Result{ + { + ID: id, + ClassName: "ActionClass", + Schema: map[string]interface{}{"foo": "bar"}, + Vector: []float32{1, 2, 3}, + Dims: 3, + }, + } + vectorRepo.On("ObjectSearch", 0, 20, mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(results, nil).Once() + + metrics.On("AddUsageDimensions", "ActionClass", "get_rest", "list_include_vector", 3) + + expected := []*models.Object{ + { + ID: id, + Class: "ActionClass", + Properties: map[string]interface{}{"foo": "bar"}, + VectorWeights: (map[string]string)(nil), + Vector: []float32{1, 2, 3}, + }, + } + + res, err := manager.GetObjects(context.Background(), &models.Principal{}, nil, nil, nil, nil, nil, additional.Properties{Vector: true}, "") + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + + t.Run("list all existing actions with all explicit offset and limit", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + + results := []search.Result{ + { + ID: id, + ClassName: "ActionClass", + Schema: map[string]interface{}{"foo": "bar"}, + }, + } + vectorRepo.On("ObjectSearch", 7, 2, mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(results, nil).Once() + + expected := []*models.Object{ + { + ID: id, + Class: "ActionClass", + Properties: map[string]interface{}{"foo": "bar"}, + VectorWeights: (map[string]string)(nil), + }, + } + + res, err := manager.GetObjects(context.Background(), &models.Principal{}, ptInt64(7), ptInt64(2), nil, nil, nil, additional.Properties{}, "") + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + + t.Run("with an offset greater than the maximum", func(t *testing.T) { + reset() + + _, err := manager.GetObjects(context.Background(), &models.Principal{}, ptInt64(201), ptInt64(2), nil, nil, nil, additional.Properties{}, "") + require.NotNil(t, err) + assert.Contains(t, err.Error(), "query maximum results exceeded") + }) + + t.Run("with a limit greater than the minimum", func(t *testing.T) { + reset() + + _, err := manager.GetObjects(context.Background(), &models.Principal{}, ptInt64(0), ptInt64(202), nil, nil, nil, additional.Properties{}, "") + require.NotNil(t, err) + assert.Contains(t, err.Error(), "query maximum results exceeded") + }) + + t.Run("with limit and offset individually smaller, but combined greater", func(t *testing.T) { + reset() + + _, err := manager.GetObjects(context.Background(), &models.Principal{}, ptInt64(150), ptInt64(150), nil, nil, nil, additional.Properties{}, "") + require.NotNil(t, err) + assert.Contains(t, err.Error(), "query maximum results exceeded") + }) + + t.Run("additional props", func(t *testing.T) { + t.Run("on get single requests", func(t *testing.T) { + t.Run("feature projection", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + + result := &search.Result{ + ID: id, + ClassName: "ActionClass", + Schema: map[string]interface{}{"foo": "bar"}, + } + vectorRepo.On("ObjectByID", id, mock.Anything, mock.Anything).Return(result, nil).Once() + _, err := manager.GetObject(context.Background(), &models.Principal{}, "", + id, additional.Properties{ + ModuleParams: map[string]interface{}{ + "featureProjection": getDefaultParam("featureProjection"), + }, + }, nil, "") + assert.Equal(t, errors.New("get extend: unknown capability: featureProjection").Error(), err.Error()) + }) + + t.Run("semantic path", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + + result := &search.Result{ + ID: id, + ClassName: "ActionClass", + Schema: map[string]interface{}{"foo": "bar"}, + } + vectorRepo.On("ObjectByID", id, mock.Anything, mock.Anything).Return(result, nil).Once() + _, err := manager.GetObject(context.Background(), &models.Principal{}, "", + id, additional.Properties{ + ModuleParams: map[string]interface{}{ + "semanticPath": getDefaultParam("semanticPath"), + }, + }, nil, "") + assert.Equal(t, errors.New("get extend: unknown capability: semanticPath").Error(), err.Error()) + }) + + t.Run("nearest neighbors", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + + result := &search.Result{ + ID: id, + ClassName: "ActionClass", + Schema: map[string]interface{}{"foo": "bar"}, + } + vectorRepo.On("ObjectByID", id, mock.Anything, mock.Anything).Return(result, nil).Once() + extender.multi = []search.Result{ + { + ID: id, + ClassName: "ActionClass", + Schema: map[string]interface{}{"foo": "bar"}, + AdditionalProperties: models.AdditionalProperties{ + "nearestNeighbors": &NearestNeighbors{ + Neighbors: []*NearestNeighbor{ + { + Concept: "foo", + Distance: 0.3, + }, + }, + }, + }, + }, + } + + expected := &models.Object{ + ID: id, + Class: "ActionClass", + Properties: map[string]interface{}{"foo": "bar"}, + VectorWeights: (map[string]string)(nil), + Additional: models.AdditionalProperties{ + "nearestNeighbors": &NearestNeighbors{ + Neighbors: []*NearestNeighbor{ + { + Concept: "foo", + Distance: 0.3, + }, + }, + }, + }, + } + + res, err := manager.GetObject(context.Background(), &models.Principal{}, "", + id, additional.Properties{ + ModuleParams: map[string]interface{}{ + "nearestNeighbors": true, + }, + }, nil, "") + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + }) + + t.Run("on list requests", func(t *testing.T) { + t.Run("nearest neighbors", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + + result := []search.Result{ + { + ID: id, + ClassName: "ActionClass", + Schema: map[string]interface{}{"foo": "bar"}, + }, + } + vectorRepo.On("ObjectSearch", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(result, nil).Once() + extender.multi = []search.Result{ + { + ID: id, + ClassName: "ActionClass", + Schema: map[string]interface{}{"foo": "bar"}, + AdditionalProperties: models.AdditionalProperties{ + "nearestNeighbors": &NearestNeighbors{ + Neighbors: []*NearestNeighbor{ + { + Concept: "foo", + Distance: 0.3, + }, + }, + }, + }, + }, + } + + expected := []*models.Object{ + { + ID: id, + Class: "ActionClass", + Properties: map[string]interface{}{"foo": "bar"}, + VectorWeights: (map[string]string)(nil), + Additional: models.AdditionalProperties{ + "nearestNeighbors": &NearestNeighbors{ + Neighbors: []*NearestNeighbor{ + { + Concept: "foo", + Distance: 0.3, + }, + }, + }, + }, + }, + } + + res, err := manager.GetObjects(context.Background(), &models.Principal{}, nil, ptInt64(10), nil, nil, nil, additional.Properties{ + ModuleParams: map[string]interface{}{ + "nearestNeighbors": true, + }, + }, "") + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + + t.Run("feature projection", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + + result := []search.Result{ + { + ID: id, + ClassName: "ActionClass", + Schema: map[string]interface{}{"foo": "bar"}, + }, + } + vectorRepo.On("ObjectSearch", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(result, nil).Once() + projectorFake.multi = []search.Result{ + { + ID: id, + ClassName: "ActionClass", + Schema: map[string]interface{}{"foo": "bar"}, + AdditionalProperties: models.AdditionalProperties{ + "featureProjection": &FeatureProjection{ + Vector: []float32{1, 2, 3}, + }, + }, + }, + } + + expected := []*models.Object{ + { + ID: id, + Class: "ActionClass", + Properties: map[string]interface{}{"foo": "bar"}, + VectorWeights: (map[string]string)(nil), + Additional: models.AdditionalProperties{ + "featureProjection": &FeatureProjection{ + Vector: []float32{1, 2, 3}, + }, + }, + }, + } + + res, err := manager.GetObjects(context.Background(), &models.Principal{}, nil, ptInt64(10), nil, nil, nil, additional.Properties{ + ModuleParams: map[string]interface{}{ + "featureProjection": getDefaultParam("featureProjection"), + }, + }, "") + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + }) + }) + + t.Run("sort props", func(t *testing.T) { + t.Run("sort=foo,number&order=asc,desc", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + sort := "foo,number" + asc := "asc,desc" + expectedSort := []filters.Sort{ + {Path: []string{"foo"}, Order: "asc"}, + {Path: []string{"number"}, Order: "desc"}, + } + + result := []search.Result{ + { + ID: id, + ClassName: "ActionClass", + Schema: map[string]interface{}{ + "foo": "bar", + "number": float64(1), + }, + }, + } + vectorRepo.On("ObjectSearch", mock.AnythingOfType("int"), mock.AnythingOfType("int"), expectedSort, + mock.Anything, mock.Anything, mock.Anything).Return(result, nil).Once() + projectorFake.multi = []search.Result{ + { + ID: id, + ClassName: "ActionClass", + Schema: map[string]interface{}{ + "foo": "bar", + "number": float64(1), + }, + }, + } + + expected := []*models.Object{ + { + ID: id, + Class: "ActionClass", + Properties: map[string]interface{}{ + "foo": "bar", + "number": float64(1), + }, + VectorWeights: (map[string]string)(nil), + }, + } + + res, err := manager.GetObjects(context.Background(), &models.Principal{}, nil, ptInt64(10), &sort, &asc, nil, additional.Properties{}, "") + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + + t.Run("sort=foo,number,prop1,prop2&order=desc", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + sort := "foo,number,prop1,prop2" + asc := "desc" + expectedSort := []filters.Sort{ + {Path: []string{"foo"}, Order: "desc"}, + {Path: []string{"number"}, Order: "asc"}, + {Path: []string{"prop1"}, Order: "asc"}, + {Path: []string{"prop2"}, Order: "asc"}, + } + + result := []search.Result{ + { + ID: id, + ClassName: "ActionClass", + Schema: map[string]interface{}{ + "foo": "bar", + "number": float64(1), + }, + }, + } + vectorRepo.On("ObjectSearch", mock.Anything, mock.Anything, expectedSort, mock.Anything, mock.Anything, + mock.Anything).Return(result, nil).Once() + projectorFake.multi = []search.Result{ + { + ID: id, + ClassName: "ActionClass", + Schema: map[string]interface{}{ + "foo": "bar", + "number": float64(1), + }, + }, + } + + expected := []*models.Object{ + { + ID: id, + Class: "ActionClass", + Properties: map[string]interface{}{ + "foo": "bar", + "number": float64(1), + }, + VectorWeights: (map[string]string)(nil), + }, + } + + res, err := manager.GetObjects(context.Background(), &models.Principal{}, nil, ptInt64(10), &sort, &asc, nil, additional.Properties{}, "") + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + + t.Run("sort=foo,number", func(t *testing.T) { + reset() + sort := "foo,number" + expectedSort := []filters.Sort{ + {Path: []string{"foo"}, Order: "asc"}, + {Path: []string{"number"}, Order: "asc"}, + } + result := []search.Result{ + { + ID: "uuid", + ClassName: "ActionClass", + Schema: map[string]interface{}{ + "foo": "bar", + "number": float64(1), + }, + }, + } + + vectorRepo.On("ObjectSearch", mock.Anything, mock.Anything, expectedSort, mock.Anything, mock.Anything, + mock.Anything).Return(result, nil).Once() + + _, err := manager.GetObjects(context.Background(), &models.Principal{}, nil, ptInt64(10), &sort, nil, nil, additional.Properties{}, "") + require.Nil(t, err) + }) + + t.Run("sort=foo,number,prop", func(t *testing.T) { + reset() + sort := "foo,number,prop" + expectedSort := []filters.Sort{ + {Path: []string{"foo"}, Order: "asc"}, + {Path: []string{"number"}, Order: "asc"}, + {Path: []string{"prop"}, Order: "asc"}, + } + result := []search.Result{ + { + ID: "uuid", + ClassName: "ActionClass", + Schema: map[string]interface{}{ + "foo": "bar", + "number": float64(1), + }, + }, + } + + vectorRepo.On("ObjectSearch", mock.Anything, mock.Anything, expectedSort, mock.Anything, mock.Anything, + mock.Anything).Return(result, nil).Once() + + _, err := manager.GetObjects(context.Background(), &models.Principal{}, nil, ptInt64(10), &sort, nil, nil, additional.Properties{}, "") + require.Nil(t, err) + }) + + t.Run("order=asc", func(t *testing.T) { + reset() + order := "asc" + var expectedSort []filters.Sort + result := []search.Result{ + { + ID: "uuid", + ClassName: "ActionClass", + Schema: map[string]interface{}{ + "foo": "bar", + "number": float64(1), + }, + }, + } + + vectorRepo.On("ObjectSearch", mock.Anything, mock.Anything, expectedSort, mock.Anything, mock.Anything, + mock.Anything).Return(result, nil).Once() + + _, err := manager.GetObjects(context.Background(), &models.Principal{}, nil, ptInt64(10), nil, &order, nil, additional.Properties{}, "") + require.Nil(t, err) + }) + }) +} + +func Test_GetThing(t *testing.T) { + var ( + vectorRepo *fakeVectorRepo + manager *Manager + extender *fakeExtender + projectorFake *fakeProjector + ) + + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "ThingClass", + }, + }, + }, + } + + reset := func() { + vectorRepo = &fakeVectorRepo{} + schemaManager := &fakeSchemaManager{ + GetSchemaResponse: schema, + } + cfg := &config.WeaviateConfig{} + cfg.Config.QueryDefaults.Limit = 20 + cfg.Config.QueryMaximumResults = 200 + authorizer := mocks.NewMockAuthorizer() + logger, _ := test.NewNullLogger() + extender = &fakeExtender{} + projectorFake = &fakeProjector{} + metrics := &fakeMetrics{} + manager = NewManager(schemaManager, cfg, logger, + authorizer, vectorRepo, + getFakeModulesProviderWithCustomExtenders(extender, projectorFake), metrics, nil, + NewAutoSchemaManager(schemaManager, vectorRepo, cfg, authorizer, logger, prometheus.NewPedanticRegistry())) + } + + t.Run("get non-existing thing by id", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + + vectorRepo.On("ObjectByID", id, mock.Anything, mock.Anything).Return((*search.Result)(nil), nil).Once() + + _, err := manager.GetObject(context.Background(), &models.Principal{}, "", id, + additional.Properties{}, nil, "") + assert.Equal(t, NewErrNotFound("no object with id '99ee9968-22ec-416a-9032-cff80f2f7fdf'"), err) + }) + + t.Run("get existing thing by id", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + + result := &search.Result{ + ID: id, + ClassName: "ThingClass", + Schema: map[string]interface{}{"foo": "bar"}, + } + vectorRepo.On("ObjectByID", id, mock.Anything, mock.Anything).Return(result, nil).Once() + + expected := &models.Object{ + ID: id, + Class: "ThingClass", + Properties: map[string]interface{}{"foo": "bar"}, + VectorWeights: (map[string]string)(nil), + } + + res, err := manager.GetObject(context.Background(), &models.Principal{}, "", id, + additional.Properties{}, nil, "") + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + + t.Run("list all existing things", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + + results := []search.Result{ + { + ID: id, + ClassName: "ThingClass", + Schema: map[string]interface{}{"foo": "bar"}, + }, + } + vectorRepo.On("ObjectSearch", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(results, nil).Once() + + expected := []*models.Object{ + { + ID: id, + Class: "ThingClass", + Properties: map[string]interface{}{"foo": "bar"}, + VectorWeights: (map[string]string)(nil), + }, + } + + res, err := manager.GetObjects(context.Background(), &models.Principal{}, nil, nil, nil, nil, nil, additional.Properties{}, "") + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + + t.Run("additional props", func(t *testing.T) { + t.Run("on get single requests", func(t *testing.T) { + t.Run("feature projection", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + + result := &search.Result{ + ID: id, + ClassName: "ThingClass", + Schema: map[string]interface{}{"foo": "bar"}, + } + vectorRepo.On("ObjectByID", id, mock.Anything, mock.Anything).Return(result, nil).Once() + _, err := manager.GetObject(context.Background(), &models.Principal{}, "", + id, additional.Properties{ + ModuleParams: map[string]interface{}{ + "featureProjection": getDefaultParam("featureProjection"), + }, + }, nil, "") + assert.Equal(t, errors.New("get extend: unknown capability: featureProjection").Error(), err.Error()) + }) + + t.Run("nearest neighbors", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + + result := &search.Result{ + ID: id, + ClassName: "ThingClass", + Schema: map[string]interface{}{"foo": "bar"}, + } + vectorRepo.On("ObjectByID", id, mock.Anything, mock.Anything).Return(result, nil).Once() + extender.multi = []search.Result{ + { + ID: id, + ClassName: "ThingClass", + Schema: map[string]interface{}{"foo": "bar"}, + AdditionalProperties: models.AdditionalProperties{ + "nearestNeighbors": &NearestNeighbors{ + Neighbors: []*NearestNeighbor{ + { + Concept: "foo", + Distance: 0.3, + }, + }, + }, + }, + }, + } + + expected := &models.Object{ + ID: id, + Class: "ThingClass", + Properties: map[string]interface{}{"foo": "bar"}, + VectorWeights: (map[string]string)(nil), + Additional: models.AdditionalProperties{ + "nearestNeighbors": &NearestNeighbors{ + Neighbors: []*NearestNeighbor{ + { + Concept: "foo", + Distance: 0.3, + }, + }, + }, + }, + } + + res, err := manager.GetObject(context.Background(), &models.Principal{}, "", + id, additional.Properties{ + ModuleParams: map[string]interface{}{ + "nearestNeighbors": true, + }, + }, nil, "") + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + }) + + t.Run("on list requests", func(t *testing.T) { + t.Run("nearest neighbors", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + + result := []search.Result{ + { + ID: id, + ClassName: "ThingClass", + Schema: map[string]interface{}{"foo": "bar"}, + }, + } + vectorRepo.On("ObjectSearch", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(result, nil).Once() + extender.multi = []search.Result{ + { + ID: id, + ClassName: "ThingClass", + Schema: map[string]interface{}{"foo": "bar"}, + AdditionalProperties: models.AdditionalProperties{ + "nearestNeighbors": &NearestNeighbors{ + Neighbors: []*NearestNeighbor{ + { + Concept: "foo", + Distance: 0.3, + }, + }, + }, + }, + }, + } + + expected := []*models.Object{ + { + ID: id, + Class: "ThingClass", + Properties: map[string]interface{}{"foo": "bar"}, + VectorWeights: (map[string]string)(nil), + Additional: models.AdditionalProperties{ + "nearestNeighbors": &NearestNeighbors{ + Neighbors: []*NearestNeighbor{ + { + Concept: "foo", + Distance: 0.3, + }, + }, + }, + }, + }, + } + + res, err := manager.GetObjects(context.Background(), &models.Principal{}, nil, ptInt64(10), nil, nil, nil, additional.Properties{ + ModuleParams: map[string]interface{}{ + "nearestNeighbors": true, + }, + }, "") + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + + t.Run("feature projection", func(t *testing.T) { + reset() + id := strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + + result := []search.Result{ + { + ID: id, + ClassName: "ThingClass", + Schema: map[string]interface{}{"foo": "bar"}, + }, + } + vectorRepo.On("ObjectSearch", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(result, nil).Once() + projectorFake.multi = []search.Result{ + { + ID: id, + ClassName: "ThingClass", + Schema: map[string]interface{}{"foo": "bar"}, + AdditionalProperties: models.AdditionalProperties{ + "featureProjection": &FeatureProjection{ + Vector: []float32{1, 2, 3}, + }, + }, + }, + } + + expected := []*models.Object{ + { + ID: id, + Class: "ThingClass", + Properties: map[string]interface{}{"foo": "bar"}, + VectorWeights: (map[string]string)(nil), + Additional: models.AdditionalProperties{ + "featureProjection": &FeatureProjection{ + Vector: []float32{1, 2, 3}, + }, + }, + }, + } + + res, err := manager.GetObjects(context.Background(), &models.Principal{}, nil, ptInt64(10), nil, nil, nil, additional.Properties{ + ModuleParams: map[string]interface{}{ + "featureProjection": getDefaultParam("featureProjection"), + }, + }, "") + require.Nil(t, err) + assert.Equal(t, expected, res) + }) + }) + }) +} + +func Test_GetObject(t *testing.T) { + var ( + principal = models.Principal{} + adds = additional.Properties{} + className = "MyClass" + id = strfmt.UUID("99ee9968-22ec-416a-9032-cff80f2f7fdf") + schema = schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: className, + }, + }, + }, + } + result = &search.Result{ + ID: id, + ClassName: className, + Schema: map[string]interface{}{"foo": "bar"}, + } + ) + + t.Run("without projection", func(t *testing.T) { + m := newFakeGetManager(schema) + m.repo.On("Object", className, id, mock.Anything, mock.Anything, "").Return((*search.Result)(nil), nil).Once() + _, err := m.GetObject(context.Background(), &principal, className, id, adds, nil, "") + if err == nil { + t.Errorf("GetObject() must return an error for non existing object") + } + + m.repo.On("Object", className, id, mock.Anything, mock.Anything, "").Return(result, nil).Once() + expected := &models.Object{ + ID: id, + Class: className, + Properties: map[string]interface{}{"foo": "bar"}, + VectorWeights: (map[string]string)(nil), + } + + got, err := m.GetObject(context.Background(), &principal, className, id, adds, nil, "") + require.Nil(t, err) + assert.Equal(t, expected, got) + }) + + t.Run("with projection", func(t *testing.T) { + m := newFakeGetManager(schema) + m.extender.multi = []search.Result{ + { + ID: id, + ClassName: className, + Schema: map[string]interface{}{"foo": "bar"}, + AdditionalProperties: models.AdditionalProperties{ + "nearestNeighbors": &NearestNeighbors{ + Neighbors: []*NearestNeighbor{ + { + Concept: "foo", + Distance: 0.3, + }, + }, + }, + }, + }, + } + m.repo.On("Object", className, id, mock.Anything, mock.Anything, "").Return(result, nil).Once() + _, err := m.GetObject(context.Background(), &principal, className, id, + additional.Properties{ + ModuleParams: map[string]interface{}{ + "Unknown": getDefaultParam("Unknown"), + }, + }, nil, "") + if err == nil { + t.Errorf("GetObject() must return unknown feature projection error") + } + + m.repo.On("Object", className, id, mock.Anything, mock.Anything, "").Return(result, nil).Once() + expected := &models.Object{ + ID: id, + Class: className, + Properties: map[string]interface{}{"foo": "bar"}, + VectorWeights: (map[string]string)(nil), + Additional: models.AdditionalProperties{ + "nearestNeighbors": &NearestNeighbors{ + Neighbors: []*NearestNeighbor{ + { + Concept: "foo", + Distance: 0.3, + }, + }, + }, + }, + } + + res, err := m.GetObject(context.Background(), &principal, className, id, + additional.Properties{ + ModuleParams: map[string]interface{}{ + "nearestNeighbors": true, + }, + }, nil, "") + require.Nil(t, err) + assert.Equal(t, expected, res) + }) +} + +func ptInt64(in int64) *int64 { + return &in +} + +type fakeGetManager struct { + *Manager + repo *fakeVectorRepo + extender *fakeExtender + projector *fakeProjector + authorizer *mocks.FakeAuthorizer + metrics *fakeMetrics + modulesProvider *fakeModulesProvider +} + +func newFakeGetManager(schema schema.Schema, opts ...func(*fakeGetManager)) fakeGetManager { + r := fakeGetManager{ + repo: new(fakeVectorRepo), + extender: new(fakeExtender), + projector: new(fakeProjector), + authorizer: mocks.NewMockAuthorizer(), + metrics: new(fakeMetrics), + modulesProvider: new(fakeModulesProvider), + } + + for _, opt := range opts { + opt(&r) + } + + schemaManager := &fakeSchemaManager{ + GetSchemaResponse: schema, + } + + cfg := &config.WeaviateConfig{} + cfg.Config.QueryDefaults.Limit = 20 + cfg.Config.QueryMaximumResults = 200 + cfg.Config.TrackVectorDimensions = true + logger, _ := test.NewNullLogger() + r.modulesProvider = getFakeModulesProviderWithCustomExtenders(r.extender, r.projector) + r.Manager = NewManager(schemaManager, cfg, logger, + r.authorizer, r.repo, r.modulesProvider, r.metrics, nil, + NewAutoSchemaManager(schemaManager, r.repo, cfg, r.authorizer, logger, prometheus.NewPedanticRegistry())) + + return r +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/head.go b/platform/dbops/binaries/weaviate-src/usecases/objects/head.go new file mode 100644 index 0000000000000000000000000000000000000000..b9066fcfc7806f7d08df07342d96f644b42cda75 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/head.go @@ -0,0 +1,54 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "errors" + + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" +) + +// HeadObject check object's existence in the connected DB +func (m *Manager) HeadObject(ctx context.Context, principal *models.Principal, className string, + id strfmt.UUID, repl *additional.ReplicationProperties, tenant string, +) (bool, *Error) { + className, _ = m.resolveAlias(className) + if err := m.authorizer.Authorize(ctx, principal, authorization.READ, authorization.Objects(className, tenant, id)); err != nil { + return false, &Error{err.Error(), StatusForbidden, err} + } + + m.metrics.HeadObjectInc() + defer m.metrics.HeadObjectDec() + + if cls := m.schemaManager.ResolveAlias(className); cls != "" { + className = cls + } + + ok, err := m.vectorRepo.Exists(ctx, className, id, repl, tenant) + if err != nil { + switch { + case errors.As(err, &ErrMultiTenancy{}): + return false, &Error{"repo.exists", StatusUnprocessableEntity, err} + default: + if (errors.As(err, &ErrDirtyReadOfDeletedObject{})) { + return false, nil + } + return false, &Error{"repo.exists", StatusInternalServerError, err} + } + } + return ok, nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/head_test.go b/platform/dbops/binaries/weaviate-src/usecases/objects/head_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e8bf9334c0b74df8007bbc064a8bdd36b584d64b --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/head_test.go @@ -0,0 +1,82 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "testing" + + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + "github.com/weaviate/weaviate/entities/schema" +) + +func Test_HeadObject(t *testing.T) { + t.Parallel() + var ( + cls = "MyClass" + id = strfmt.UUID("5a1cd361-1e0d-42ae-bd52-ee09cb5f31cc") + m = newFakeGetManager(schema.Schema{}) + errAny = errors.New("any") + ) + + tests := []struct { + class string + mockedOk bool + mockedErr error + authErr error + wantOK bool + wantCode int + }{ + { + mockedOk: true, + wantOK: true, + }, + { + class: cls, + mockedOk: true, + wantOK: true, + }, + { + class: cls, + mockedOk: false, + wantOK: false, + }, + { + class: cls, + mockedOk: false, + mockedErr: errAny, + wantOK: false, + wantCode: StatusInternalServerError, + }, + { + class: cls, + authErr: errAny, + wantOK: false, + wantCode: StatusForbidden, + }, + } + for i, tc := range tests { + m.authorizer.SetErr(tc.authErr) + if tc.authErr == nil { + m.repo.On("Exists", tc.class, id).Return(tc.mockedOk, tc.mockedErr).Once() + } + ok, err := m.Manager.HeadObject(context.Background(), nil, tc.class, id, nil, "") + code := 0 + if err != nil { + code = err.Code + } + if tc.wantOK != ok || tc.wantCode != code { + t.Errorf("case %d expected:(%v, %v) got:(%v, %v)", i+1, tc.wantOK, tc.wantCode, ok, code) + } + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/manager.go b/platform/dbops/binaries/weaviate-src/usecases/objects/manager.go new file mode 100644 index 0000000000000000000000000000000000000000..f10fe93637c519836ddb63a321420bd64067f6c9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/manager.go @@ -0,0 +1,203 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +// package objects provides managers for all kind-related items, such as objects. +// Manager provides methods for "regular" interaction, such as +// add, get, delete, update, etc. Additionally BatchManager allows for +// efficient batch-adding of object instances and references. +package objects + +import ( + "context" + "fmt" + "time" + + "github.com/go-openapi/strfmt" + "github.com/google/uuid" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/modulecapabilities" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/versioned" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/memwatch" + "github.com/weaviate/weaviate/usecases/objects/alias" +) + +type schemaManager interface { + AddClass(ctx context.Context, principal *models.Principal, class *models.Class) (*models.Class, uint64, error) + AddTenants(ctx context.Context, principal *models.Principal, class string, tenants []*models.Tenant) (uint64, error) + GetClass(ctx context.Context, principal *models.Principal, name string) (*models.Class, error) + // ReadOnlyClass return class model. + ReadOnlyClass(name string) *models.Class + // AddClassProperty it is upsert operation. it adds properties to a class and updates + // existing properties if the merge bool passed true. + AddClassProperty(ctx context.Context, principal *models.Principal, class *models.Class, className string, merge bool, prop ...*models.Property) (*models.Class, uint64, error) + + // Consistent methods with the consistency flag. + // This is used to ensure that internal users will not miss-use the flag and it doesn't need to be set to a default + // value everytime we use the Manager. + + // GetConsistentClass overrides the default implementation to consider the consistency flag + GetConsistentClass(ctx context.Context, principal *models.Principal, + name string, consistency bool, + ) (*models.Class, uint64, error) + + // GetCachedClass extracts class from context. If class was not set it is fetched first + GetCachedClass(ctx context.Context, principal *models.Principal, names ...string, + ) (map[string]versioned.Class, error) + + GetCachedClassNoAuth(ctx context.Context, names ...string) (map[string]versioned.Class, error) + + // WaitForUpdate ensures that the local schema has caught up to schemaVersion + WaitForUpdate(ctx context.Context, schemaVersion uint64) error + + // GetConsistentSchema retrieves a locally cached copy of the schema + GetConsistentSchema(ctx context.Context, principal *models.Principal, consistency bool) (schema.Schema, error) + + // ResolveAlias returns a class name associated with a given alias, empty string if doesn't exist + ResolveAlias(alias string) string +} + +// Manager manages kind changes at a use-case level, i.e. agnostic of +// underlying databases or storage providers +type Manager struct { + config *config.WeaviateConfig + schemaManager schemaManager + logger logrus.FieldLogger + authorizer authorization.Authorizer + vectorRepo VectorRepo + timeSource timeSource + modulesProvider ModulesProvider + autoSchemaManager *AutoSchemaManager + metrics objectsMetrics + allocChecker *memwatch.Monitor +} + +type objectsMetrics interface { + BatchInc() + BatchDec() + BatchRefInc() + BatchRefDec() + BatchDeleteInc() + BatchDeleteDec() + AddObjectInc() + AddObjectDec() + UpdateObjectInc() + UpdateObjectDec() + MergeObjectInc() + MergeObjectDec() + DeleteObjectInc() + DeleteObjectDec() + GetObjectInc() + GetObjectDec() + HeadObjectInc() + HeadObjectDec() + AddReferenceInc() + AddReferenceDec() + UpdateReferenceInc() + UpdateReferenceDec() + DeleteReferenceInc() + DeleteReferenceDec() + AddUsageDimensions(className, queryType, operation string, dims int) +} + +type timeSource interface { + Now() int64 +} + +type VectorRepo interface { + PutObject(ctx context.Context, concept *models.Object, vector []float32, + vectors map[string][]float32, multiVectors map[string][][]float32, + repl *additional.ReplicationProperties, schemaVersion uint64) error + DeleteObject(ctx context.Context, className string, id strfmt.UUID, deletionTime time.Time, + repl *additional.ReplicationProperties, tenant string, schemaVersion uint64) error + // Object returns object of the specified class giving by its id + Object(ctx context.Context, class string, id strfmt.UUID, props search.SelectProperties, + additional additional.Properties, repl *additional.ReplicationProperties, + tenant string) (*search.Result, error) + // Exists returns true if an object of a giving class exists + Exists(ctx context.Context, class string, id strfmt.UUID, + repl *additional.ReplicationProperties, tenant string) (bool, error) + ObjectByID(ctx context.Context, id strfmt.UUID, props search.SelectProperties, + additional additional.Properties, tenant string) (*search.Result, error) + ObjectSearch(ctx context.Context, offset, limit int, filters *filters.LocalFilter, + sort []filters.Sort, additional additional.Properties, tenant string) (search.Results, error) + AddReference(ctx context.Context, source *crossref.RefSource, + target *crossref.Ref, repl *additional.ReplicationProperties, tenant string, schemaVersion uint64) error + Merge(ctx context.Context, merge MergeDocument, repl *additional.ReplicationProperties, tenant string, schemaVersion uint64) error + Query(context.Context, *QueryInput) (search.Results, *Error) +} + +type ModulesProvider interface { + GetObjectAdditionalExtend(ctx context.Context, in *search.Result, + moduleParams map[string]interface{}) (*search.Result, error) + ListObjectsAdditionalExtend(ctx context.Context, in search.Results, + moduleParams map[string]interface{}) (search.Results, error) + UsingRef2Vec(className string) bool + UpdateVector(ctx context.Context, object *models.Object, class *models.Class, repo modulecapabilities.FindObjectFn, + logger logrus.FieldLogger) error + BatchUpdateVector(ctx context.Context, class *models.Class, objects []*models.Object, + findObjectFn modulecapabilities.FindObjectFn, + logger logrus.FieldLogger) (map[int]error, error) + VectorizerName(className string) (string, error) +} + +// NewManager creates a new manager +func NewManager(schemaManager schemaManager, + config *config.WeaviateConfig, logger logrus.FieldLogger, + authorizer authorization.Authorizer, vectorRepo VectorRepo, + modulesProvider ModulesProvider, metrics objectsMetrics, allocChecker *memwatch.Monitor, + autoSchemaManager *AutoSchemaManager, +) *Manager { + if allocChecker == nil { + allocChecker = memwatch.NewDummyMonitor() + } + + return &Manager{ + config: config, + schemaManager: schemaManager, + logger: logger, + authorizer: authorizer, + vectorRepo: vectorRepo, + timeSource: defaultTimeSource{}, + modulesProvider: modulesProvider, + autoSchemaManager: autoSchemaManager, + metrics: metrics, + allocChecker: allocChecker, + } +} + +// Alias +func (m *Manager) resolveAlias(class string) (className, aliasName string) { + return alias.ResolveAlias(m.schemaManager, class) +} + +func generateUUID() (strfmt.UUID, error) { + id, err := uuid.NewRandom() + if err != nil { + return "", fmt.Errorf("could not generate uuid v4: %w", err) + } + + return strfmt.UUID(id.String()), nil +} + +type defaultTimeSource struct{} + +func (ts defaultTimeSource) Now() int64 { + return time.Now().UnixNano() / int64(time.Millisecond) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/merge.go b/platform/dbops/binaries/weaviate-src/usecases/objects/merge.go new file mode 100644 index 0000000000000000000000000000000000000000..c9d490ec705c053719926d7ccd16071da9d8ae94 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/merge.go @@ -0,0 +1,285 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "errors" + "fmt" + + "github.com/weaviate/weaviate/entities/versioned" + + "github.com/weaviate/weaviate/entities/classcache" + + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/usecases/auth/authorization" + authzerrs "github.com/weaviate/weaviate/usecases/auth/authorization/errors" + "github.com/weaviate/weaviate/usecases/config" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +type MergeDocument struct { + Class string `json:"class"` + ID strfmt.UUID `json:"id"` + PrimitiveSchema map[string]interface{} `json:"primitiveSchema"` + References BatchReferences `json:"references"` + Vector []float32 `json:"vector"` + Vectors models.Vectors `json:"vectors"` + UpdateTime int64 `json:"updateTime"` + AdditionalProperties models.AdditionalProperties `json:"additionalProperties"` + PropertiesToDelete []string `json:"propertiesToDelete"` +} + +func (m *Manager) MergeObject(ctx context.Context, principal *models.Principal, + updates *models.Object, repl *additional.ReplicationProperties, +) *Error { + if err := m.validateInputs(updates); err != nil { + return &Error{"bad request", StatusBadRequest, err} + } + className, aliasName := m.resolveAlias(schema.UppercaseClassName(updates.Class)) + updates.Class = className + cls, id := updates.Class, updates.ID + if err := m.authorizer.Authorize(ctx, principal, authorization.UPDATE, authorization.Objects(cls, updates.Tenant, id)); err != nil { + return &Error{err.Error(), StatusForbidden, err} + } + + ctx = classcache.ContextWithClassCache(ctx) + + // we don't reveal any info that the end users cannot get through the structure of the data anyway + fetchedClass, err := m.schemaManager.GetCachedClassNoAuth(ctx, className) + if err != nil { + if errors.As(err, &authzerrs.Forbidden{}) { + return &Error{err.Error(), StatusForbidden, err} + } + + return &Error{err.Error(), StatusBadRequest, NewErrInvalidUserInput("invalid object: %v", err)} + } + + m.metrics.MergeObjectInc() + defer m.metrics.MergeObjectDec() + + if err := m.allocChecker.CheckAlloc(memwatch.EstimateObjectMemory(updates)); err != nil { + m.logger.WithError(err).Errorf("memory pressure: cannot process patch object") + return &Error{err.Error(), StatusInternalServerError, err} + } + + obj, err := m.vectorRepo.Object(ctx, cls, id, nil, additional.Properties{}, repl, updates.Tenant) + if err != nil { + switch { + case errors.As(err, &ErrMultiTenancy{}): + return &Error{"repo.object", StatusUnprocessableEntity, err} + default: + if errors.As(err, &ErrDirtyReadOfDeletedObject{}) || errors.As(err, &ErrDirtyWriteOfDeletedObject{}) { + m.logger.WithError(err).Debugf("object %s/%s not found, possibly due to replication consistency races", cls, id) + return &Error{"not found", StatusNotFound, err} + } + if errors.As(err, &authzerrs.Forbidden{}) { + return &Error{"forbidden", StatusForbidden, err} + } + return &Error{"repo.object", StatusInternalServerError, err} + } + } + if obj == nil { + return &Error{"not found", StatusNotFound, err} + } + + maxSchemaVersion, err := m.autoSchemaManager.autoSchema(ctx, principal, false, fetchedClass, updates) + if err != nil { + return &Error{"bad request", StatusBadRequest, NewErrInvalidUserInput("invalid object: %v", err)} + } + + var propertiesToDelete []string + if updates.Properties != nil { + for key, val := range updates.Properties.(map[string]interface{}) { + if val == nil { + propertiesToDelete = append(propertiesToDelete, schema.LowercaseFirstLetter(key)) + } + } + } + + prevObj := obj.Object() + if err := m.validateObjectAndNormalizeNames(ctx, repl, updates, prevObj, fetchedClass); err != nil { + return &Error{"bad request", StatusBadRequest, err} + } + + if updates.Properties == nil { + updates.Properties = map[string]interface{}{} + } + + pathErr := m.patchObject(ctx, prevObj, updates, repl, propertiesToDelete, updates.Tenant, fetchedClass, maxSchemaVersion) + if aliasName != "" { + updates.Class = aliasName + } + return pathErr +} + +// patchObject patches an existing object obj with updates +func (m *Manager) patchObject(ctx context.Context, prevObj, updates *models.Object, repl *additional.ReplicationProperties, + propertiesToDelete []string, tenant string, fetchedClass map[string]versioned.Class, maxSchemaVersion uint64, +) *Error { + cls, id := updates.Class, updates.ID + class := fetchedClass[cls].Class + primitive, refs := m.splitPrimitiveAndRefs(updates.Properties.(map[string]interface{}), cls, id) + objWithVec, err := m.mergeObjectSchemaAndVectorize(ctx, prevObj.Properties, + primitive, prevObj.Vector, updates.Vector, prevObj.Vectors, updates.Vectors, updates.ID, class) + if err != nil { + return &Error{"merge and vectorize", StatusInternalServerError, err} + } + mergeDoc := MergeDocument{ + Class: cls, + ID: id, + PrimitiveSchema: primitive, + References: refs, + Vector: objWithVec.Vector, + Vectors: objWithVec.Vectors, + UpdateTime: m.timeSource.Now(), + PropertiesToDelete: propertiesToDelete, + } + + if objWithVec.Additional != nil { + mergeDoc.AdditionalProperties = objWithVec.Additional + } + + // Ensure that the local schema has caught up to the version we used to validate + if err := m.schemaManager.WaitForUpdate(ctx, maxSchemaVersion); err != nil { + return &Error{ + Msg: fmt.Sprintf("error waiting for local schema to catch up to version %d", maxSchemaVersion), + Code: StatusInternalServerError, + Err: err, + } + } + + if err := m.vectorRepo.Merge(ctx, mergeDoc, repl, tenant, maxSchemaVersion); err != nil { + if errors.As(err, &ErrDirtyReadOfDeletedObject{}) || errors.As(err, &ErrDirtyWriteOfDeletedObject{}) { + m.logger.WithError(err).Debugf("object %s/%s not found, possibly due to replication consistency races", cls, id) + return &Error{"not found", StatusNotFound, err} + } + return &Error{"repo.merge", StatusInternalServerError, err} + } + + return nil +} + +func (m *Manager) validateInputs(updates *models.Object) error { + if updates == nil { + return fmt.Errorf("empty updates") + } + if updates.Class == "" { + return fmt.Errorf("empty class") + } + if updates.ID == "" { + return fmt.Errorf("empty uuid") + } + return nil +} + +func (m *Manager) mergeObjectSchemaAndVectorize(ctx context.Context, prevPropsSch models.PropertySchema, + nextProps map[string]interface{}, prevVec, nextVec []float32, prevVecs models.Vectors, nextVecs models.Vectors, + id strfmt.UUID, class *models.Class, +) (*models.Object, error) { + var mergedProps map[string]interface{} + + vector := nextVec + vectors := nextVecs + if prevPropsSch == nil { + mergedProps = nextProps + } else { + prevProps, ok := prevPropsSch.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("expected previous schema to be map, but got %#v", prevPropsSch) + } + + mergedProps = map[string]interface{}{} + for propName, propValue := range prevProps { + mergedProps[propName] = propValue + } + for propName, propValue := range nextProps { + mergedProps[propName] = propValue + } + } + + // Note: vector could be a nil vector in case a vectorizer is configured, + // then the vectorizer will set it + obj := &models.Object{Class: class.Class, Properties: mergedProps, Vector: vector, Vectors: vectors, ID: id} + if err := m.modulesProvider.UpdateVector(ctx, obj, class, m.findObject, m.logger); err != nil { + return nil, err + } + + // If there is no vectorization module and no updated vector, use the previous vector(s) + if obj.Vector == nil && class.Vectorizer == config.VectorizerModuleNone { + obj.Vector = prevVec + } + + if obj.Vectors == nil { + obj.Vectors = models.Vectors{} + } + + // check for each named vector if the previous vector should be used. This should only happen if + // - the vectorizer is none + // - the vector is not set in the update + // - the vector was set in the previous object + for name, vectorConfig := range class.VectorConfig { + if _, ok := vectorConfig.Vectorizer.(map[string]interface{})[config.VectorizerModuleNone]; !ok { + continue + } + + prevTargetVector, ok := prevVecs[name] + if !ok { + continue + } + + if _, ok := obj.Vectors[name]; !ok { + obj.Vectors[name] = prevTargetVector + } + } + + return obj, nil +} + +func (m *Manager) splitPrimitiveAndRefs(in map[string]interface{}, sourceClass string, + sourceID strfmt.UUID, +) (map[string]interface{}, BatchReferences) { + primitive := map[string]interface{}{} + var outRefs BatchReferences + + for prop, value := range in { + refs, ok := value.(models.MultipleRef) + + if !ok { + // this must be a primitive filed + primitive[prop] = value + continue + } + + for _, ref := range refs { + target, _ := crossref.Parse(ref.Beacon.String()) + // safe to ignore error as validation has already been passed + + source := &crossref.RefSource{ + Local: true, + PeerName: "localhost", + Property: schema.PropertyName(prop), + Class: schema.ClassName(sourceClass), + TargetID: sourceID, + } + + outRefs = append(outRefs, BatchReference{From: source, To: target}) + } + } + + return primitive, outRefs +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/merge_test.go b/platform/dbops/binaries/weaviate-src/usecases/objects/merge_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0bb3d3cdcad59b34193587cd92dc300eb476a866 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/merge_test.go @@ -0,0 +1,525 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "encoding/json" + "errors" + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/entities/search" +) + +type stage int + +const ( + stageInit = iota + // stageInputValidation + stageAuthorization + stageUpdateValidation + stageObjectExists + // stageVectorization + // stageMerge + stageCount +) + +func Test_MergeObject(t *testing.T) { + t.Parallel() + var ( + uuid = strfmt.UUID("dd59815b-142b-4c54-9b12-482434bd54ca") + cls = "ZooAction" + lastTime int64 = 12345 + errAny = errors.New("any error") + ) + + tests := []struct { + name string + // inputs + previous *models.Object + updated *models.Object + vectorizerCalledWith *models.Object + + // outputs + expectedOutput *MergeDocument + wantCode int + + // control return errors + errMerge error + errUpdateObject error + errGetObject error + errExists error + stage + }{ + { + name: "empty class", + previous: nil, + updated: &models.Object{ + ID: uuid, + }, + wantCode: StatusBadRequest, + stage: stageInit, + }, + { + name: "empty uuid", + previous: nil, + updated: &models.Object{ + Class: cls, + }, + wantCode: StatusBadRequest, + stage: stageInit, + }, + { + name: "empty updates", + previous: nil, + wantCode: StatusBadRequest, + stage: stageInit, + }, + { + name: "object not found", + previous: nil, + updated: &models.Object{ + Class: cls, + ID: uuid, + Properties: map[string]interface{}{ + "name": "My little pony zoo with extra sparkles", + }, + }, + wantCode: StatusNotFound, + stage: stageObjectExists, + }, + { + name: "object failure", + previous: nil, + updated: &models.Object{ + Class: cls, + ID: uuid, + Properties: map[string]interface{}{ + "name": "My little pony zoo with extra sparkles", + }, + }, + wantCode: StatusInternalServerError, + errGetObject: errAny, + stage: stageObjectExists, + }, + { + name: "cross-ref not found", + previous: nil, + updated: &models.Object{ + Class: cls, + ID: uuid, + Properties: map[string]interface{}{ + "name": "My little pony zoo with extra sparkles", + "hasAnimals": []interface{}{ + map[string]interface{}{ + "beacon": "weaviate://localhost/a8ffc82c-9845-4014-876c-11369353c33c", + }, + }, + }, + }, + wantCode: StatusNotFound, + errExists: errAny, + stage: stageAuthorization, + }, + { + name: "merge failure", + previous: &models.Object{ + Class: cls, + Properties: map[string]interface{}{}, + Vectors: map[string]models.Vector{}, + }, + updated: &models.Object{ + Class: cls, + ID: uuid, + Properties: map[string]interface{}{ + "name": "My little pony zoo with extra sparkles", + }, + }, + vectorizerCalledWith: &models.Object{ + Class: cls, + Properties: map[string]interface{}{ + "name": "My little pony zoo with extra sparkles", + }, + }, + expectedOutput: &MergeDocument{ + UpdateTime: lastTime, + Class: cls, + ID: uuid, + Vector: []float32{1, 2, 3}, + PrimitiveSchema: map[string]interface{}{ + "name": "My little pony zoo with extra sparkles", + }, + Vectors: map[string]models.Vector{}, + }, + errMerge: errAny, + wantCode: StatusInternalServerError, + stage: stageCount, + }, + { + name: "vectorization failure", + previous: &models.Object{ + Class: cls, + Properties: map[string]interface{}{}, + }, + updated: &models.Object{ + Class: cls, + ID: uuid, + Properties: map[string]interface{}{ + "name": "My little pony zoo with extra sparkles", + }, + }, + vectorizerCalledWith: &models.Object{ + Class: cls, + Properties: map[string]interface{}{ + "name": "My little pony zoo with extra sparkles", + }, + }, + errUpdateObject: errAny, + wantCode: StatusInternalServerError, + stage: stageCount, + }, + { + name: "add property", + previous: &models.Object{ + Class: cls, + Properties: map[string]interface{}{}, + }, + updated: &models.Object{ + Class: cls, + ID: uuid, + Properties: map[string]interface{}{ + "name": "My little pony zoo with extra sparkles", + }, + }, + vectorizerCalledWith: &models.Object{ + Class: cls, + Properties: map[string]interface{}{ + "name": "My little pony zoo with extra sparkles", + }, + }, + expectedOutput: &MergeDocument{ + UpdateTime: lastTime, + Class: cls, + ID: uuid, + Vector: []float32{1, 2, 3}, + PrimitiveSchema: map[string]interface{}{ + "name": "My little pony zoo with extra sparkles", + }, + Vectors: map[string]models.Vector{}, + }, + stage: stageCount, + }, + { + name: "update property", + previous: &models.Object{ + Class: cls, + Properties: map[string]interface{}{"name": "this name"}, + Vector: []float32{0.7, 0.3}, + }, + updated: &models.Object{ + Class: cls, + ID: uuid, + Properties: map[string]interface{}{ + "name": "another name", + }, + }, + vectorizerCalledWith: &models.Object{ + Class: cls, + Properties: map[string]interface{}{ + "name": "another name", + }, + }, + expectedOutput: &MergeDocument{ + UpdateTime: lastTime, + Class: cls, + ID: uuid, + Vector: []float32{1, 2, 3}, + PrimitiveSchema: map[string]interface{}{ + "name": "another name", + }, + Vectors: map[string]models.Vector{}, + }, + stage: stageCount, + }, + { + name: "without properties", + previous: &models.Object{ + Class: cls, + }, + updated: &models.Object{ + Class: cls, + ID: uuid, + }, + vectorizerCalledWith: &models.Object{ + Class: cls, + Properties: map[string]interface{}{}, + }, + expectedOutput: &MergeDocument{ + UpdateTime: lastTime, + Class: cls, + ID: uuid, + Vector: []float32{1, 2, 3}, + PrimitiveSchema: map[string]interface{}{}, + Vectors: map[string]models.Vector{}, + }, + stage: stageCount, + }, + { + name: "add primitive properties of different types", + previous: &models.Object{ + Class: cls, + Properties: map[string]interface{}{}, + }, + updated: &models.Object{ + Class: cls, + ID: uuid, + Properties: map[string]interface{}{ + "name": "My little pony zoo with extra sparkles", + "area": 3.222, + "employees": json.Number("70"), + "located": map[string]interface{}{ + "latitude": 30.2, + "longitude": 60.2, + }, + "foundedIn": "2002-10-02T15:00:00Z", + }, + }, + vectorizerCalledWith: &models.Object{ + Class: cls, + Properties: map[string]interface{}{ + "name": "My little pony zoo with extra sparkles", + "area": 3.222, + "employees": int64(70), + "located": &models.GeoCoordinates{ + Latitude: ptFloat32(30.2), + Longitude: ptFloat32(60.2), + }, + "foundedIn": timeMustParse(time.RFC3339, "2002-10-02T15:00:00Z"), + }, + }, + expectedOutput: &MergeDocument{ + UpdateTime: lastTime, + Class: cls, + ID: uuid, + Vector: []float32{1, 2, 3}, + PrimitiveSchema: map[string]interface{}{ + "name": "My little pony zoo with extra sparkles", + "area": 3.222, + "employees": float64(70), + "located": &models.GeoCoordinates{ + Latitude: ptFloat32(30.2), + Longitude: ptFloat32(60.2), + }, + "foundedIn": timeMustParse(time.RFC3339, "2002-10-02T15:00:00Z"), + }, + Vectors: map[string]models.Vector{}, + }, + stage: stageCount, + }, + { + name: "add primitive and ref properties", + previous: &models.Object{ + Class: cls, + Properties: map[string]interface{}{}, + }, + updated: &models.Object{ + Class: cls, + ID: uuid, + Properties: map[string]interface{}{ + "name": "My little pony zoo with extra sparkles", + "hasAnimals": []interface{}{ + map[string]interface{}{ + "beacon": "weaviate://localhost/AnimalAction/a8ffc82c-9845-4014-876c-11369353c33c", + }, + }, + }, + }, + vectorizerCalledWith: &models.Object{ + Class: cls, + Properties: map[string]interface{}{ + "name": "My little pony zoo with extra sparkles", + }, + }, + expectedOutput: &MergeDocument{ + UpdateTime: lastTime, + Class: cls, + ID: uuid, + PrimitiveSchema: map[string]interface{}{ + "name": "My little pony zoo with extra sparkles", + }, + Vector: []float32{1, 2, 3}, + References: BatchReferences{ + BatchReference{ + From: crossrefMustParseSource("weaviate://localhost/ZooAction/dd59815b-142b-4c54-9b12-482434bd54ca/hasAnimals"), + To: crossrefMustParse("weaviate://localhost/AnimalAction/a8ffc82c-9845-4014-876c-11369353c33c"), + }, + }, + Vectors: map[string]models.Vector{}, + }, + stage: stageCount, + }, + { + name: "update vector non-vectorized class", + previous: &models.Object{ + Class: "NotVectorized", + Properties: map[string]interface{}{ + "description": "this description was set initially", + }, + Vector: []float32{0.7, 0.3}, + }, + updated: &models.Object{ + Class: "NotVectorized", + ID: uuid, + Vector: []float32{0.66, 0.22}, + }, + vectorizerCalledWith: nil, + expectedOutput: &MergeDocument{ + UpdateTime: lastTime, + Class: "NotVectorized", + ID: uuid, + Vector: []float32{0.66, 0.22}, + PrimitiveSchema: map[string]interface{}{}, + Vectors: map[string]models.Vector{}, + }, + stage: stageCount, + }, + { + name: "do not update vector non-vectorized class", + previous: &models.Object{ + Class: "NotVectorized", + Properties: map[string]interface{}{ + "description": "this description was set initially", + }, + Vector: []float32{0.7, 0.3}, + }, + updated: &models.Object{ + Class: "NotVectorized", + ID: uuid, + Properties: map[string]interface{}{ + "description": "this description was updated", + }, + }, + vectorizerCalledWith: nil, + expectedOutput: &MergeDocument{ + UpdateTime: lastTime, + Class: "NotVectorized", + ID: uuid, + Vector: []float32{0.7, 0.3}, + PrimitiveSchema: map[string]interface{}{ + "description": "this description was updated", + }, + Vectors: map[string]models.Vector{}, + }, + stage: stageCount, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + m := newFakeGetManager(zooAnimalSchemaForTest()) + m.timeSource = fakeTimeSource{} + cls := "" + if tc.updated != nil { + cls = tc.updated.Class + } + if tc.previous != nil { + m.repo.On("Object", cls, uuid, search.SelectProperties(nil), additional.Properties{}, ""). + Return(&search.Result{ + Schema: tc.previous.Properties, + ClassName: tc.previous.Class, + Vector: tc.previous.Vector, + }, nil) + } else if tc.stage >= stageAuthorization { + m.repo.On("Object", cls, uuid, search.SelectProperties(nil), additional.Properties{}, ""). + Return((*search.Result)(nil), tc.errGetObject) + } + + if tc.expectedOutput != nil { + m.repo.On("Merge", *tc.expectedOutput).Return(tc.errMerge) + } + + if tc.vectorizerCalledWith != nil { + if tc.errUpdateObject != nil { + m.modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(nil, tc.errUpdateObject) + } else { + m.modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(tc.expectedOutput.Vector, nil) + } + } + + if tc.expectedOutput != nil && tc.expectedOutput.Vector != nil { + m.modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(tc.expectedOutput.Vector, tc.errUpdateObject) + } + + // called during validation of cross-refs only. + m.repo.On("Exists", mock.Anything, mock.Anything).Maybe().Return(true, tc.errExists) + + err := m.MergeObject(context.Background(), nil, tc.updated, nil) + code := 0 + if err != nil { + code = err.Code + } + if tc.wantCode != code { + t.Fatalf("status code want: %v got: %v", tc.wantCode, code) + } else if code == 0 && err != nil { + t.Fatal(err) + } + + m.repo.AssertExpectations(t) + m.modulesProvider.AssertExpectations(t) + }) + } +} + +func timeMustParse(layout, value string) time.Time { + t, err := time.Parse(layout, value) + if err != nil { + panic(err) + } + return t +} + +func crossrefMustParse(in string) *crossref.Ref { + ref, err := crossref.Parse(in) + if err != nil { + panic(err) + } + + return ref +} + +func crossrefMustParseSource(in string) *crossref.RefSource { + ref, err := crossref.ParseSource(in) + if err != nil { + panic(err) + } + + return ref +} + +type fakeTimeSource struct{} + +func (f fakeTimeSource) Now() int64 { + return 12345 +} + +func ptFloat32(in float32) *float32 { + return &in +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/metrics.go b/platform/dbops/binaries/weaviate-src/usecases/objects/metrics.go new file mode 100644 index 0000000000000000000000000000000000000000..cad20d5c5a2212a114d1b93176f9afea7df3d750 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/metrics.go @@ -0,0 +1,210 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/weaviate/weaviate/usecases/monitoring" +) + +type Metrics struct { + queriesCount *prometheus.GaugeVec + batchTime *prometheus.HistogramVec + dimensions *prometheus.CounterVec + dimensionsCombined prometheus.Counter + groupClasses bool + batchTenants prometheus.Summary + batchObjects prometheus.Summary +} + +func NewMetrics(prom *monitoring.PrometheusMetrics) *Metrics { + if prom == nil { + return nil + } + + return &Metrics{ + queriesCount: prom.QueriesCount, + batchTime: prom.BatchTime, + dimensions: prom.QueryDimensions, + dimensionsCombined: prom.QueryDimensionsCombined, + groupClasses: prom.Group, + batchTenants: prom.BatchSizeTenants, + batchObjects: prom.BatchSizeObjects, + } +} + +func (m *Metrics) queriesInc(queryType string) { + if m == nil { + return + } + + m.queriesCount.With(prometheus.Labels{ + "class_name": "n/a", + "query_type": queryType, + }).Inc() +} + +func (m *Metrics) queriesDec(queryType string) { + if m == nil { + return + } + + m.queriesCount.With(prometheus.Labels{ + "class_name": "n/a", + "query_type": queryType, + }).Dec() +} + +func (m *Metrics) BatchInc() { + m.queriesInc("batch") +} + +func (m *Metrics) BatchDec() { + m.queriesDec("batch") +} + +func (m *Metrics) BatchRefInc() { + m.queriesInc("batch_references") +} + +func (m *Metrics) BatchRefDec() { + m.queriesDec("batch_references") +} + +func (m *Metrics) BatchDeleteInc() { + m.queriesInc("batch_delete") +} + +func (m *Metrics) BatchDeleteDec() { + m.queriesDec("batch_delete") +} + +func (m *Metrics) AddObjectInc() { + m.queriesInc("add_object") +} + +func (m *Metrics) AddObjectDec() { + m.queriesDec("add_object") +} + +func (m *Metrics) UpdateObjectInc() { + m.queriesInc("update_object") +} + +func (m *Metrics) UpdateObjectDec() { + m.queriesDec("update_object") +} + +func (m *Metrics) MergeObjectInc() { + m.queriesInc("merge_object") +} + +func (m *Metrics) MergeObjectDec() { + m.queriesDec("merge_object") +} + +func (m *Metrics) DeleteObjectInc() { + m.queriesInc("delete_object") +} + +func (m *Metrics) DeleteObjectDec() { + m.queriesDec("delete_object") +} + +func (m *Metrics) GetObjectInc() { + m.queriesInc("get_object") +} + +func (m *Metrics) GetObjectDec() { + m.queriesDec("get_object") +} + +func (m *Metrics) HeadObjectInc() { + m.queriesInc("head_object") +} + +func (m *Metrics) HeadObjectDec() { + m.queriesDec("head_object") +} + +func (m *Metrics) AddReferenceInc() { + m.queriesInc("add_reference") +} + +func (m *Metrics) AddReferenceDec() { + m.queriesDec("add_reference") +} + +func (m *Metrics) UpdateReferenceInc() { + m.queriesInc("update_reference") +} + +func (m *Metrics) UpdateReferenceDec() { + m.queriesDec("update_reference") +} + +func (m *Metrics) DeleteReferenceInc() { + m.queriesInc("delete_reference") +} + +func (m *Metrics) DeleteReferenceDec() { + m.queriesDec("delete_reference") +} + +func (m *Metrics) BatchOp(op string, startNs int64) { + if m == nil { + return + } + + took := float64(time.Now().UnixNano()-startNs) / float64(time.Millisecond) + + m.batchTime.With(prometheus.Labels{ + "operation": op, + "class_name": "n/a", + "shard_name": "n/a", + }).Observe(float64(took)) +} + +func (m *Metrics) BatchTenants(tenants int) { + if m == nil { + return + } + + m.batchTenants.Observe(float64(tenants)) +} + +func (m *Metrics) BatchObjects(objects int) { + if m == nil { + return + } + + m.batchObjects.Observe(float64(objects)) +} + +func (m *Metrics) AddUsageDimensions(className, queryType, operation string, dims int) { + if m == nil { + return + } + + if m.groupClasses { + className = "n/a" + } + + m.dimensions.With(prometheus.Labels{ + "class_name": className, + "operation": operation, + "query_type": queryType, + }).Add(float64(dims)) + m.dimensionsCombined.Add(float64(dims)) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/models_for_test.go b/platform/dbops/binaries/weaviate-src/usecases/objects/models_for_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6c69034c75d88e072cd2ab3710566c890003f390 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/models_for_test.go @@ -0,0 +1,26 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +type FeatureProjection struct { + Vector []float32 `json:"vector"` +} + +type NearestNeighbors struct { + Neighbors []*NearestNeighbor `json:"neighbors"` +} + +type NearestNeighbor struct { + Concept string `json:"concept,omitempty"` + Distance float32 `json:"distance,omitempty"` + Vector []float32 `json:"vector"` +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/query.go b/platform/dbops/binaries/weaviate-src/usecases/objects/query.go new file mode 100644 index 0000000000000000000000000000000000000000..2500b68ee649fefaf87eabed6f4b885a1881c631 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/query.go @@ -0,0 +1,122 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "fmt" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/auth/authorization/filter" +) + +type QueryInput struct { + Class string + Offset int + Limit int + Cursor *filters.Cursor + Filters *filters.LocalFilter + Sort []filters.Sort + Tenant string + Additional additional.Properties +} + +type QueryParams struct { + Class string + Offset *int64 + Limit *int64 + After *string + Sort *string + Order *string + Tenant *string + Additional additional.Properties +} + +func (q *QueryParams) inputs(m *Manager) (*QueryInput, error) { + smartOffset, smartLimit, err := m.localOffsetLimit(q.Offset, q.Limit) + if err != nil { + return nil, err + } + sort := m.getSort(q.Sort, q.Order) + cursor := m.getCursor(q.After, q.Limit) + tenant := "" + if q.Tenant != nil { + tenant = *q.Tenant + } + return &QueryInput{ + Class: q.Class, + Offset: smartOffset, + Limit: smartLimit, + Sort: sort, + Cursor: cursor, + Tenant: tenant, + Additional: q.Additional, + }, nil +} + +func (m *Manager) Query(ctx context.Context, principal *models.Principal, params *QueryParams, +) ([]*models.Object, *Error) { + class := "*" + + if params != nil && params.Class != "" { + params.Class, _ = m.resolveAlias(params.Class) + class = params.Class + } + + if err := m.authorizer.Authorize(ctx, principal, authorization.READ, authorization.CollectionsData(class)...); err != nil { + return nil, &Error{err.Error(), StatusForbidden, err} + } + + m.metrics.GetObjectInc() + defer m.metrics.GetObjectDec() + + q, err := params.inputs(m) + if err != nil { + return nil, &Error{"offset or limit", StatusBadRequest, err} + } + + filteredQuery := filter.New[*QueryInput](m.authorizer, m.config.Config.Authorization.Rbac).Filter( + ctx, + m.logger, + principal, + []*QueryInput{q}, + authorization.READ, + func(qi *QueryInput) string { + return authorization.CollectionsData(qi.Class)[0] + }, + ) + if len(filteredQuery) == 0 { + err = fmt.Errorf("unauthorized to access collection %s", q.Class) + return nil, &Error{err.Error(), StatusForbidden, err} + } + + res, rerr := m.vectorRepo.Query(ctx, filteredQuery[0]) + if rerr != nil { + return nil, rerr + } + + if m.modulesProvider != nil { + res, err = m.modulesProvider.ListObjectsAdditionalExtend(ctx, res, q.Additional.ModuleParams) + if err != nil { + return nil, &Error{"extend results", StatusInternalServerError, err} + } + } + + if q.Additional.Vector { + m.trackUsageList(res) + } + + return res.ObjectsWithVector(q.Additional.Vector), nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/query_test.go b/platform/dbops/binaries/weaviate-src/usecases/objects/query_test.go new file mode 100644 index 0000000000000000000000000000000000000000..28f9e4e45ca63966383353e559cc3cdba066ece7 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/query_test.go @@ -0,0 +1,156 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" +) + +func TestQuery(t *testing.T) { + t.Parallel() + var ( + cls = "MyClass" + m = newFakeGetManager(schema.Schema{}) + errAny = errors.New("any") + ) + params := QueryParams{ + Class: cls, + Limit: ptInt64(10), + } + inputs := QueryInput{ + Class: cls, + Limit: 10, + } + tests := []struct { + class string + name string + param QueryParams + mockedErr *Error + authErr error + wantCode int + mockedDBResponse []search.Result + wantResponse []*models.Object + wantQueryInput QueryInput + wantUsageTracking bool + }{ + { + name: "not found", + class: cls, + param: params, + mockedErr: &Error{Code: StatusNotFound}, + wantCode: StatusNotFound, + wantQueryInput: inputs, + }, + { + name: "forbidden", + class: cls, + param: params, + authErr: errAny, + wantCode: StatusForbidden, + wantQueryInput: inputs, + }, + { + name: "happy path", + class: cls, + param: params, + mockedDBResponse: []search.Result{ + { + ClassName: cls, + Schema: map[string]interface{}{ + "foo": "bar", + }, + Dims: 3, + Dist: 0, + }, + }, + wantResponse: []*models.Object{{ + Class: cls, + VectorWeights: map[string]string(nil), + Properties: map[string]interface{}{ + "foo": "bar", + }, + }}, + wantQueryInput: inputs, + }, + { + name: "happy path with explicit vector requested", + class: cls, + param: QueryParams{ + Class: cls, + Limit: ptInt64(10), + Additional: additional.Properties{Vector: true}, + }, + mockedDBResponse: []search.Result{ + { + ClassName: cls, + Schema: map[string]interface{}{ + "foo": "bar", + }, + Dims: 3, + }, + }, + wantResponse: []*models.Object{{ + Class: cls, + VectorWeights: map[string]string(nil), + Properties: map[string]interface{}{ + "foo": "bar", + }, + }}, + wantQueryInput: QueryInput{ + Class: cls, + Limit: 10, + Additional: additional.Properties{Vector: true}, + }, + wantUsageTracking: true, + }, + { + name: "bad request", + class: cls, + param: QueryParams{Class: cls, Offset: ptInt64(1), Limit: &m.config.Config.QueryMaximumResults}, + wantCode: StatusBadRequest, + wantQueryInput: inputs, + }, + } + for i, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + m.authorizer.SetErr(tc.authErr) + if tc.authErr == nil { + m.repo.On("Query", &tc.wantQueryInput).Return(tc.mockedDBResponse, tc.mockedErr).Once() + } + if tc.wantUsageTracking { + m.metrics.On("AddUsageDimensions", cls, "get_rest", "list_include_vector", + tc.mockedDBResponse[0].Dims) + } + res, err := m.Manager.Query(context.Background(), &models.Principal{ + Username: "testuser", + }, &tc.param) + code := 0 + if err != nil { + code = err.Code + } + if tc.wantCode != code { + t.Errorf("case %d expected:%v got:%v", i+1, tc.wantCode, code) + } + + assert.Equal(t, tc.wantResponse, res) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/references.go b/platform/dbops/binaries/weaviate-src/usecases/objects/references.go new file mode 100644 index 0000000000000000000000000000000000000000..f3a13794a0fd0d94677db75af19e2c3d3d71bfbc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/references.go @@ -0,0 +1,76 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "errors" + "fmt" + + "github.com/weaviate/weaviate/entities/versioned" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/crossref" + autherrs "github.com/weaviate/weaviate/usecases/auth/authorization/errors" +) + +func (m *Manager) autodetectToClass(class *models.Class, fromProperty string, beaconRef *crossref.Ref) (strfmt.URI, strfmt.URI, bool, *Error) { + // autodetect to class from schema if not part of reference + prop, err := schema.GetPropertyByName(class, schema.LowercaseFirstLetter(fromProperty)) + if err != nil { + return "", "", false, &Error{"cannot get property", StatusInternalServerError, err} + } + if len(prop.DataType) > 1 { + return "", "", false, nil // can't autodetect for multi target + } + + toClass := prop.DataType[0] // datatype is the name of the class that is referenced + toBeacon := crossref.NewLocalhost(toClass, beaconRef.TargetID).String() + + return strfmt.URI(toClass), strfmt.URI(toBeacon), true, nil +} + +func (m *Manager) getAuthorizedFromClass(ctx context.Context, principal *models.Principal, className string) (*models.Class, uint64, versioned.Classes, *Error) { + fetchedClass, err := m.schemaManager.GetCachedClass(ctx, principal, className) + if err != nil { + if errors.As(err, &autherrs.Forbidden{}) { + return nil, 0, nil, &Error{err.Error(), StatusForbidden, err} + } + + return nil, 0, nil, &Error{err.Error(), StatusBadRequest, err} + } + if _, ok := fetchedClass[className]; !ok { + err := fmt.Errorf("collection %q not found in schema", className) + return nil, 0, nil, &Error{"collection not found", StatusBadRequest, err} + } + + return fetchedClass[className].Class, fetchedClass[className].Version, fetchedClass, nil +} + +// validateNames validates class and property names +func validateReferenceName(class, property string) error { + if _, err := schema.ValidateClassName(class); err != nil { + return err + } + + if err := schema.ValidateReservedPropertyName(property); err != nil { + return err + } + + if _, err := schema.ValidatePropertyName(property); err != nil { + return err + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/references_add.go b/platform/dbops/binaries/weaviate-src/usecases/objects/references_add.go new file mode 100644 index 0000000000000000000000000000000000000000..6fe3a075e59ff7db00148f2fc55b5795989e57f9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/references_add.go @@ -0,0 +1,199 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "errors" + "fmt" + + autherrs "github.com/weaviate/weaviate/usecases/auth/authorization/errors" + + "github.com/weaviate/weaviate/usecases/auth/authorization" + + "github.com/go-openapi/strfmt" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/classcache" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/usecases/objects/validation" +) + +// AddObjectReference to an existing object. If the class contains a network +// ref, it has a side-effect on the schema: The schema will be updated to +// include this particular network ref class. +func (m *Manager) AddObjectReference(ctx context.Context, principal *models.Principal, + input *AddReferenceInput, repl *additional.ReplicationProperties, tenant string, +) *Error { + m.metrics.AddReferenceInc() + defer m.metrics.AddReferenceDec() + + ctx = classcache.ContextWithClassCache(ctx) + input.Class = schema.UppercaseClassName(input.Class) + input.Class, _ = m.resolveAlias(input.Class) + + if err := m.authorizer.Authorize(ctx, principal, authorization.UPDATE, authorization.ShardsData(input.Class, tenant)...); err != nil { + return &Error{err.Error(), StatusForbidden, err} + } + + deprecatedEndpoint := input.Class == "" + if deprecatedEndpoint { // for backward compatibility only + if err := m.authorizer.Authorize(ctx, principal, authorization.READ, authorization.Collections()...); err != nil { + return &Error{err.Error(), StatusForbidden, err} + } + objectRes, err := m.getObjectFromRepo(ctx, "", input.ID, + additional.Properties{}, nil, tenant) + if err != nil { + errnf := ErrNotFound{} // treated as StatusBadRequest for backward comp + if errors.As(err, &errnf) { + return &Error{"source object deprecated", StatusBadRequest, err} + } else if errors.As(err, &ErrMultiTenancy{}) { + return &Error{"source object deprecated", StatusUnprocessableEntity, err} + } + return &Error{"source object deprecated", StatusInternalServerError, err} + } + input.Class = objectRes.Object().Class + } + + if err := validateReferenceName(input.Class, input.Property); err != nil { + return &Error{err.Error(), StatusBadRequest, err} + } + + class, schemaVersion, fetchedClass, typedErr := m.getAuthorizedFromClass(ctx, principal, input.Class) + if typedErr != nil { + return typedErr + } + + validator := validation.New(m.vectorRepo.Exists, m.config, repl) + targetRef, err := input.validate(validator, class) + if err != nil { + if errors.As(err, &ErrMultiTenancy{}) { + return &Error{"validate inputs", StatusUnprocessableEntity, err} + } + var forbidden autherrs.Forbidden + if errors.As(err, &forbidden) { + return &Error{"validate inputs", StatusForbidden, err} + } + + return &Error{"validate inputs", StatusBadRequest, err} + } + + if input.Class != "" && targetRef.Class == "" { + toClass, toBeacon, replace, err := m.autodetectToClass(class, input.Property, targetRef) + if err != nil { + return err + } + if replace { + input.Ref.Class = toClass + input.Ref.Beacon = toBeacon + targetRef.Class = string(toClass) + } + } + + if err := m.authorizer.Authorize(ctx, principal, authorization.READ, authorization.ShardsData(targetRef.Class, tenant)...); err != nil { + return &Error{err.Error(), StatusForbidden, err} + } + if err := input.validateExistence(ctx, validator, tenant, targetRef); err != nil { + return &Error{"validate existence", StatusBadRequest, err} + } + + if !deprecatedEndpoint { + ok, err := m.vectorRepo.Exists(ctx, input.Class, input.ID, repl, tenant) + if err != nil { + switch { + case errors.As(err, &ErrMultiTenancy{}): + return &Error{"source object", StatusUnprocessableEntity, err} + default: + return &Error{"source object", StatusInternalServerError, err} + } + } + if !ok { + return &Error{"source object", StatusNotFound, err} + } + } + + source := crossref.NewSource(schema.ClassName(input.Class), + schema.PropertyName(input.Property), input.ID) + + target, err := crossref.ParseSingleRef(&input.Ref) + if err != nil { + return &Error{"parse target ref", StatusBadRequest, err} + } + + if shouldValidateMultiTenantRef(tenant, source, target) { + _, err = validateReferenceMultiTenancy(ctx, principal, + m.schemaManager, m.vectorRepo, source, target, tenant, fetchedClass) + if err != nil { + switch { + case errors.As(err, &autherrs.Forbidden{}): + return &Error{"validation", StatusForbidden, err} + default: + return &Error{"multi-tenancy violation", StatusInternalServerError, err} + } + } + } + + // Ensure that the local schema has caught up to the version we used to validate + if err := m.schemaManager.WaitForUpdate(ctx, schemaVersion); err != nil { + return &Error{ + Msg: fmt.Sprintf("error waiting for local schema to catch up to version %d", schemaVersion), + Code: StatusInternalServerError, + Err: err, + } + } + if err := m.vectorRepo.AddReference(ctx, source, target, repl, tenant, schemaVersion); err != nil { + return &Error{"add reference to repo", StatusInternalServerError, err} + } + + if err := m.updateRefVector(ctx, principal, input.Class, input.ID, tenant, class, schemaVersion); err != nil { + return &Error{"update ref vector", StatusInternalServerError, err} + } + + return nil +} + +func shouldValidateMultiTenantRef(tenant string, source *crossref.RefSource, target *crossref.Ref) bool { + return tenant != "" || (source != nil && target != nil && source.Class != "" && target.Class != "") +} + +// AddReferenceInput represents required inputs to add a reference to an existing object. +type AddReferenceInput struct { + // Class name + Class string + // ID of an object + ID strfmt.UUID + // Property name + Property string + // Ref cross reference + Ref models.SingleRef +} + +func (req *AddReferenceInput) validate( + v *validation.Validator, + class *models.Class, +) (*crossref.Ref, error) { + ref, err := v.ValidateSingleRef(&req.Ref) + if err != nil { + return nil, err + } + + return ref, validateReferenceSchema(class, req.Property) +} + +func (req *AddReferenceInput) validateExistence( + ctx context.Context, + v *validation.Validator, tenant string, ref *crossref.Ref, +) error { + return v.ValidateExistence(ctx, ref, "validate reference", tenant) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/references_delete.go b/platform/dbops/binaries/weaviate-src/usecases/objects/references_delete.go new file mode 100644 index 0000000000000000000000000000000000000000..06e13a6520bd0b82aeb911534babf2d86cbb180e --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/references_delete.go @@ -0,0 +1,190 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "errors" + "fmt" + "slices" + + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/usecases/auth/authorization" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/classcache" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema/crossref" +) + +// DeleteReferenceInput represents required inputs to delete a reference from an existing object. +type DeleteReferenceInput struct { + // Class name + Class string + // ID of an object + ID strfmt.UUID + // Property name + Property string + // Reference cross reference + Reference models.SingleRef +} + +func (m *Manager) DeleteObjectReference(ctx context.Context, principal *models.Principal, + input *DeleteReferenceInput, repl *additional.ReplicationProperties, tenant string, +) *Error { + m.metrics.DeleteReferenceInc() + defer m.metrics.DeleteReferenceDec() + + ctx = classcache.ContextWithClassCache(ctx) + input.Class = schema.UppercaseClassName(input.Class) + input.Class, _ = m.resolveAlias(input.Class) + + // We are fetching the existing object and get to know if the UUID exists + if err := m.authorizer.Authorize(ctx, principal, authorization.READ, authorization.ShardsData(input.Class, tenant)...); err != nil { + return &Error{err.Error(), StatusForbidden, err} + } + if err := m.authorizer.Authorize(ctx, principal, authorization.UPDATE, authorization.ShardsData(input.Class, tenant)...); err != nil { + return &Error{err.Error(), StatusForbidden, err} + } + + deprecatedEndpoint := input.Class == "" + // we need to know which collection an object belongs to, so for the deprecated case we first need to fetch the + // object from any collection, to then know its collection to check for the correct permissions after wards + if deprecatedEndpoint { + if err := m.authorizer.Authorize(ctx, principal, authorization.READ, authorization.CollectionsData()...); err != nil { + return &Error{err.Error(), StatusForbidden, err} + } + res, err := m.getObjectFromRepo(ctx, input.Class, input.ID, additional.Properties{}, nil, tenant) + if err != nil { + errnf := ErrNotFound{} + if errors.As(err, &errnf) { + return &Error{"source object", StatusNotFound, err} + } else if errors.As(err, &ErrMultiTenancy{}) { + return &Error{"source object", StatusUnprocessableEntity, err} + } + + return &Error{"source object", StatusInternalServerError, err} + } + input.Class = res.ClassName + } + + if err := validateReferenceName(input.Class, input.Property); err != nil { + return &Error{err.Error(), StatusBadRequest, err} + } + + class, schemaVersion, _, typedErr := m.getAuthorizedFromClass(ctx, principal, input.Class) + if typedErr != nil { + return typedErr + } + + res, err := m.getObjectFromRepo(ctx, input.Class, input.ID, additional.Properties{}, nil, tenant) + if err != nil { + errnf := ErrNotFound{} + if errors.As(err, &errnf) { + return &Error{"source object", StatusNotFound, err} + } else if errors.As(err, &ErrMultiTenancy{}) { + return &Error{"source object", StatusUnprocessableEntity, err} + } + + return &Error{"source object", StatusInternalServerError, err} + } + + beacon, err := crossref.Parse(input.Reference.Beacon.String()) + if err != nil { + return &Error{"cannot parse beacon", StatusBadRequest, err} + } + if input.Class != "" && beacon.Class == "" { + toClass, toBeacon, replace, err := m.autodetectToClass(class, input.Property, beacon) + if err != nil { + return err + } + if replace { + input.Reference.Class = toClass + input.Reference.Beacon = toBeacon + } + } + + if err := input.validateSchema(class); err != nil { + if deprecatedEndpoint { // for backward comp reasons + return &Error{"bad inputs deprecated", StatusNotFound, err} + } + if errors.As(err, &ErrMultiTenancy{}) { + return &Error{"bad inputs", StatusUnprocessableEntity, err} + } + return &Error{"bad inputs", StatusBadRequest, err} + } + + obj := res.Object() + obj.Tenant = tenant + ok, errmsg := removeReference(obj, input.Property, &input.Reference) + if errmsg != "" { + return &Error{errmsg, StatusInternalServerError, nil} + } + if !ok { + return nil + } + obj.LastUpdateTimeUnix = m.timeSource.Now() + + // Ensure that the local schema has caught up to the version we used to validate + if err := m.schemaManager.WaitForUpdate(ctx, schemaVersion); err != nil { + return &Error{ + Msg: fmt.Sprintf("error waiting for local schema to catch up to version %d", schemaVersion), + Code: StatusInternalServerError, + Err: err, + } + } + + vectors, multiVectors, err := dto.GetVectors(res.Vectors) + if err != nil { + return &Error{"repo.putobject", StatusInternalServerError, fmt.Errorf("cannot get vectors: %w", err)} + } + err = m.vectorRepo.PutObject(ctx, obj, res.Vector, vectors, multiVectors, repl, schemaVersion) + if err != nil { + return &Error{"repo.putobject", StatusInternalServerError, err} + } + + if err := m.updateRefVector(ctx, principal, input.Class, input.ID, tenant, class, schemaVersion); err != nil { + return &Error{"update ref vector", StatusInternalServerError, err} + } + + return nil +} + +func (req *DeleteReferenceInput) validateSchema(class *models.Class) error { + return validateReferenceSchema(class, req.Property) +} + +// removeReference removes ref from object obj with property prop. +// It returns ok (removal took place) and an error message +func removeReference(obj *models.Object, prop string, remove *models.SingleRef) (ok bool, errmsg string) { + properties := obj.Properties.(map[string]interface{}) + if properties == nil || properties[prop] == nil { + return false, "" + } + + refs, ok := properties[prop].(models.MultipleRef) + if !ok { + return false, fmt.Sprintf("property %s of type %T is not a valid cross-reference", prop, refs) + } + + var removed bool + properties[prop] = slices.DeleteFunc(refs, func(ref *models.SingleRef) bool { + if ref.Beacon == remove.Beacon { + removed = removed || true + return true + } + return false + }) + return removed, "" +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/references_test.go b/platform/dbops/binaries/weaviate-src/usecases/objects/references_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b7f9b80d61fa934568d9f729460cf8e6d33d5ed9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/references_test.go @@ -0,0 +1,810 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/vectorindex/hnsw" +) + +func Test_ReferencesAddDeprecated(t *testing.T) { + cls := "Zoo" + id := strfmt.UUID("my-id") + t.Run("without prior refs", func(t *testing.T) { + req := AddReferenceInput{ + ID: id, + Property: "hasAnimals", + Ref: models.SingleRef{ + Beacon: strfmt.URI("weaviate://localhost/d18c8e5e-a339-4c15-8af6-56b0cfe33ce7"), + }, + } + m := newFakeGetManager(zooAnimalSchemaForTest()) + m.repo.On("Exists", "Animal", mock.Anything).Return(true, nil) + m.repo.On("ObjectByID", mock.Anything, mock.Anything, mock.Anything).Return(&search.Result{ + ClassName: cls, + Schema: map[string]interface{}{ + "name": "MyZoo", + }, + }, nil) + expectedRefProperty := "hasAnimals" + source := crossref.NewSource(schema.ClassName(cls), schema.PropertyName(expectedRefProperty), id) + target := crossref.New("localhost", "Animal", "d18c8e5e-a339-4c15-8af6-56b0cfe33ce7") + m.repo.On("AddReference", source, target).Return(nil) + m.modulesProvider.On("UsingRef2Vec", mock.Anything).Return(false) + + err := m.AddObjectReference(context.Background(), nil, &req, nil, "") + require.Nil(t, err) + m.repo.AssertExpectations(t) + }) + t.Run("source object missing", func(t *testing.T) { + req := AddReferenceInput{ + ID: strfmt.UUID("my-id"), + Property: "hasAnimals", + Ref: models.SingleRef{ + Beacon: strfmt.URI("weaviate://localhost/d18c8e5e-a339-4c15-8af6-56b0cfe33ce7"), + }, + } + m := newFakeGetManager(zooAnimalSchemaForTest()) + m.repo.On("ObjectByID", mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) + err := m.AddObjectReference(context.Background(), nil, &req, nil, "") + require.NotNil(t, err) + if !err.BadRequest() { + t.Errorf("error expected: not found error got: %v", err) + } + }) + t.Run("source object missing", func(t *testing.T) { + req := AddReferenceInput{ + ID: strfmt.UUID("my-id"), + Property: "hasAnimals", + Ref: models.SingleRef{ + Beacon: strfmt.URI("weaviate://localhost/d18c8e5e-a339-4c15-8af6-56b0cfe33ce7"), + }, + } + m := newFakeGetManager(zooAnimalSchemaForTest()) + m.repo.On("ObjectByID", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("any")) + err := m.AddObjectReference(context.Background(), nil, &req, nil, "") + require.NotNil(t, err) + if err.Code != StatusInternalServerError { + t.Errorf("error expected: internal error, got: %v", err) + } + }) +} + +func Test_ReferenceAdd(t *testing.T) { + t.Parallel() + var ( + cls = "Zoo" + prop = "hasAnimals" + id = strfmt.UUID("d18c8e5e-000-0000-0000-56b0cfe33ce7") + refID = strfmt.UUID("d18c8e5e-a339-4c15-8af6-56b0cfe33ce7") + uri = strfmt.URI("weaviate://localhost/d18c8e5e-a339-4c15-8af6-56b0cfe33ce7") + anyErr = errors.New("any") + ref = models.SingleRef{Beacon: uri} + req = AddReferenceInput{ + Class: cls, + ID: id, + Property: prop, + Ref: ref, + } + source = crossref.NewSource(schema.ClassName(cls), schema.PropertyName(prop), id) + target = crossref.New("localhost", "Animal", refID) + ) + + tests := []struct { + Name string + // inputs + Req AddReferenceInput + + // outputs + ExpectedRef models.SingleRef + WantCode int + WantErr error + SrcNotFound bool + // control errors + ErrAddRef error + ErrTargetExists error + ErrSrcExists error + ErrAuth error + ErrLock error + ErrSchema error + // Stage: 1 -> validation(), 2 -> target exists(), 3 -> source exists(), 4 -> AddReference() + Stage int + }{ + { + Name: "authorization", Req: req, Stage: 0, + WantCode: StatusForbidden, WantErr: anyErr, ErrAuth: anyErr, + }, + { + Name: "get schema", + Req: req, Stage: 1, + ErrSchema: anyErr, + WantCode: StatusBadRequest, + }, + { + Name: "empty data type", + Req: AddReferenceInput{Class: cls, ID: id, Property: "emptyType", Ref: ref}, Stage: 1, + WantCode: StatusBadRequest, + }, + { + Name: "primitive data type", + Req: AddReferenceInput{Class: cls, ID: id, Property: "name", Ref: ref}, Stage: 1, + WantCode: StatusBadRequest, + }, + { + Name: "unknown property", + Req: AddReferenceInput{Class: cls, ID: id, Property: "unknown", Ref: ref}, Stage: 1, + WantCode: StatusBadRequest, + }, + { + Name: "valid class name", + Req: AddReferenceInput{Class: "-", ID: id, Property: prop}, Stage: 1, + WantCode: StatusBadRequest, + }, + { + Name: "reserved property name", + Req: AddReferenceInput{Class: cls, ID: id, Property: "_id"}, Stage: 1, + WantCode: StatusBadRequest, + }, + { + Name: "valid property name", + Req: AddReferenceInput{Class: cls, ID: id, Property: "-"}, Stage: 1, + WantCode: StatusBadRequest, + }, + + {Name: "add valid reference", Req: req, Stage: 4}, + { + Name: "referenced class not found", Req: req, Stage: 2, + WantCode: StatusBadRequest, + ErrTargetExists: anyErr, + WantErr: anyErr, + }, + { + Name: "source object internal error", Req: req, Stage: 3, + WantCode: StatusInternalServerError, + ErrSrcExists: anyErr, + WantErr: anyErr, + }, + { + Name: "source object missing", Req: req, Stage: 3, + WantCode: StatusNotFound, + SrcNotFound: true, + }, + { + Name: "internal error", Req: req, Stage: 4, + WantCode: StatusInternalServerError, + ErrAddRef: anyErr, + WantErr: anyErr, + }, + } + + for _, tc := range tests { + t.Run(tc.Name, func(t *testing.T) { + m := newFakeGetManager(zooAnimalSchemaForTest()) + m.authorizer.SetErr(tc.ErrAuth) + m.schemaManager.(*fakeSchemaManager).GetschemaErr = tc.ErrSchema + m.modulesProvider.On("UsingRef2Vec", mock.Anything).Return(false) + if tc.Stage >= 2 { + m.repo.On("Exists", "Animal", refID).Return(true, tc.ErrTargetExists).Once() + } + if tc.Stage >= 3 { + m.repo.On("Exists", tc.Req.Class, tc.Req.ID).Return(!tc.SrcNotFound, tc.ErrSrcExists).Once() + } + if tc.Stage >= 4 { + m.repo.On("AddReference", source, target).Return(tc.ErrAddRef).Once() + } + + err := m.AddObjectReference(context.Background(), nil, &tc.Req, nil, "") + if tc.WantCode != 0 { + code := 0 + if err != nil { + code = err.Code + } + if code != tc.WantCode { + t.Fatalf("code expected: %v, got %v", tc.WantCode, code) + } + + if tc.WantErr != nil && !errors.Is(err, tc.WantErr) { + t.Errorf("wrapped error expected: %v, got %v", tc.WantErr, err.Err) + } + + } + m.repo.AssertExpectations(t) + }) + } +} + +func Test_ReferenceUpdate(t *testing.T) { + t.Parallel() + var ( + cls = "Zoo" + prop = "hasAnimals" + id = strfmt.UUID("d18c8e5e-000-0000-0000-56b0cfe33ce7") + refID = strfmt.UUID("d18c8e5e-a339-4c15-8af6-56b0cfe33ce7") + uri = strfmt.URI("weaviate://localhost/Animals/d18c8e5e-a339-4c15-8af6-56b0cfe33ce7") + anyErr = errors.New("any") + refs = models.MultipleRef{&models.SingleRef{Beacon: uri, Class: "Animals"}} + req = PutReferenceInput{ + Class: cls, + ID: id, + Property: prop, + Refs: refs, + } + ) + + tests := []struct { + Name string + // inputs + Req PutReferenceInput + + // outputs + ExpectedRef models.SingleRef + WantCode int + WantErr error + SrcNotFound bool + // control errors + ErrPutRefs error + ErrTargetExists error + ErrSrcExists error + ErrAuth error + ErrLock error + ErrSchema error + // Stage: 1 -> validation(), 2 -> target exists(), 3 -> PutObject() + Stage int + }{ + { + Name: "source object internal error", Req: req, + WantCode: StatusInternalServerError, + ErrSrcExists: anyErr, + WantErr: NewErrInternal("repo: object by id: %v", anyErr), + Stage: 1, + }, + { + Name: "source object missing", Req: req, + WantCode: StatusNotFound, + SrcNotFound: true, + Stage: 1, + }, + { + Name: "authorization", Req: req, + WantCode: StatusForbidden, WantErr: anyErr, ErrAuth: anyErr, + Stage: 0, + }, + { + Name: "get schema", + Req: req, Stage: 1, + ErrSchema: anyErr, + WantCode: StatusBadRequest, + }, + { + Name: "empty data type", + Req: PutReferenceInput{Class: cls, ID: id, Property: "emptyType", Refs: refs}, Stage: 1, + WantCode: StatusBadRequest, + }, + { + Name: "primitive data type", + Req: PutReferenceInput{Class: cls, ID: id, Property: "name", Refs: refs}, Stage: 1, + WantCode: StatusBadRequest, + }, + { + Name: "unknown property", + Req: PutReferenceInput{Class: cls, ID: id, Property: "unknown", Refs: refs}, Stage: 1, + WantCode: StatusBadRequest, + }, + { + Name: "reserved property name", + Req: PutReferenceInput{Class: cls, ID: id, Property: "_id", Refs: refs}, Stage: 1, + WantCode: StatusBadRequest, + }, + { + Name: "valid property name", + Req: PutReferenceInput{Class: cls, ID: id, Property: "-", Refs: refs}, Stage: 1, + WantCode: StatusBadRequest, + }, + + {Name: "update valid reference", Req: req, Stage: 3}, + { + Name: "referenced class not found", Req: req, Stage: 2, + WantCode: StatusBadRequest, + ErrTargetExists: anyErr, + WantErr: anyErr, + }, + { + Name: "internal error", Req: req, Stage: 3, + WantCode: StatusInternalServerError, + ErrPutRefs: anyErr, + WantErr: anyErr, + }, + } + + for _, tc := range tests { + t.Run(tc.Name, func(t *testing.T) { + m := newFakeGetManager(zooAnimalSchemaForTest()) + m.authorizer.SetErr(tc.ErrAuth) + m.schemaManager.(*fakeSchemaManager).GetschemaErr = tc.ErrSchema + srcObj := &search.Result{ + ClassName: cls, + Schema: map[string]interface{}{ + "name": "MyZoo", + }, + } + if tc.SrcNotFound { + srcObj = nil + } + if tc.Stage >= 1 { + m.repo.On("Object", cls, id, mock.Anything, mock.Anything, "").Return(srcObj, tc.ErrSrcExists) + } + + if tc.Stage >= 2 { + m.repo.On("Exists", "Animals", refID).Return(true, tc.ErrTargetExists).Once() + } + + if tc.Stage >= 3 { + m.repo.On("PutObject", mock.Anything, mock.Anything).Return(tc.ErrPutRefs).Once() + } + + err := m.UpdateObjectReferences(context.Background(), nil, &tc.Req, nil, "") + if tc.WantCode != 0 { + code := 0 + if err != nil { + code = err.Code + } + if code != tc.WantCode { + t.Fatalf("code expected: %v, got %v", tc.WantCode, code) + } + + if tc.WantErr != nil && !errors.Is(err, tc.WantErr) { + t.Errorf("wrapped error expected: %v, got %v", tc.WantErr, err.Err) + } + + } + m.repo.AssertExpectations(t) + }) + } +} + +func Test_ReferenceDelete(t *testing.T) { + t.Parallel() + var ( + cls = "Zoo" + prop = "hasAnimals" + id = strfmt.UUID("d18c8e5e-000-0000-0000-56b0cfe33ce7") + uri = strfmt.URI("weaviate://localhost/Animal/d18c8e5e-a339-4c15-8af6-56b0cfe33ce7") + anyErr = errors.New("any") + ref = models.SingleRef{Beacon: uri} + ref2 = &models.SingleRef{Beacon: strfmt.URI("weaviate://localhost/d18c8e5e-a339-4c15-8af6-56b0cfe33ce5")} + ref3 = &models.SingleRef{Beacon: strfmt.URI("weaviate://localhost/d18c8e5e-a339-4c15-8af6-56b0cfe33ce6")} + req = DeleteReferenceInput{ + Class: cls, + ID: id, + Property: prop, + Reference: ref, + } + ) + + fakeProperties := func(refs ...*models.SingleRef) map[string]interface{} { + mrefs := make(models.MultipleRef, len(refs)) + copy(mrefs, refs) + return map[string]interface{}{ + "name": "MyZoo", + prop: mrefs, + } + } + + tests := []struct { + Name string + // inputs + Req DeleteReferenceInput + properties interface{} + NewSrcRefsLen int + // outputs + ExpectedRef models.SingleRef + WantCode int + WantErr error + SrcNotFound bool + // control errors + ErrPutRefs error + ErrTargetExists error + ErrSrcExists error + ErrAuth error + ErrSchema error + // Stage: 1 -> validation(), 2 -> target exists(), 3 -> PutObject() + Stage int + }{ + { + Name: "source object internal error", Req: req, + WantCode: StatusInternalServerError, + ErrSrcExists: anyErr, + WantErr: NewErrInternal("repo: object by id: %v", anyErr), Stage: 2, + }, + { + Name: "source object missing", Req: req, + WantCode: StatusNotFound, + SrcNotFound: true, Stage: 2, + }, + { + Name: "authorization", Req: req, + WantCode: StatusForbidden, WantErr: anyErr, ErrAuth: anyErr, Stage: 1, + }, + { + Name: "get schema", + Req: req, Stage: 1, + ErrSchema: anyErr, + WantCode: StatusBadRequest, + }, + { + Name: "empty data type", + Req: DeleteReferenceInput{Class: cls, ID: id, Property: "emptyType", Reference: ref}, Stage: 2, + WantCode: StatusBadRequest, + }, + { + Name: "primitive data type", + Req: DeleteReferenceInput{Class: cls, ID: id, Property: "name", Reference: ref}, Stage: 2, + WantCode: StatusBadRequest, + }, + { + Name: "unknown property", + Req: DeleteReferenceInput{Class: cls, ID: id, Property: "unknown", Reference: ref}, Stage: 2, + WantCode: StatusBadRequest, + }, + { + Name: "reserved property name", + Req: DeleteReferenceInput{Class: cls, ID: id, Property: "_id"}, Stage: 1, + WantCode: StatusBadRequest, + }, + { + Name: "valid property name", + Req: DeleteReferenceInput{Class: cls, ID: id, Property: "-"}, Stage: 1, + WantCode: StatusBadRequest, + }, + { + Name: "delete one reference", + Req: req, + properties: fakeProperties(ref2, &ref, ref3), NewSrcRefsLen: 2, + Stage: 3, + }, + { + Name: "delete two references", + Req: req, + properties: fakeProperties(&ref, ref2, &ref), NewSrcRefsLen: 1, + Stage: 3, + }, + { + Name: "delete all references", + Req: req, + properties: fakeProperties(&ref, &ref), NewSrcRefsLen: 0, + Stage: 3, + }, + { + Name: "reference not found", + Req: req, + properties: fakeProperties(ref2, ref3), NewSrcRefsLen: 2, + Stage: 2, + }, + { + Name: "wrong reference type", + Req: req, + properties: map[string]interface{}{prop: "wrong reference type"}, NewSrcRefsLen: 0, + Stage: 2, + }, + { + Name: "empty properties list", + Req: req, + properties: nil, NewSrcRefsLen: 0, + Stage: 2, + }, + { + Name: "internal error", + Req: req, + properties: fakeProperties(ref2, &ref, ref3), NewSrcRefsLen: 3, + Stage: 3, + WantCode: StatusInternalServerError, + ErrPutRefs: anyErr, + WantErr: anyErr, + }, + } + + for _, tc := range tests { + t.Run(tc.Name, func(t *testing.T) { + m := newFakeGetManager(zooAnimalSchemaForTest()) + m.authorizer.SetErr(tc.ErrAuth) + m.schemaManager.(*fakeSchemaManager).GetschemaErr = tc.ErrSchema + srcObj := &search.Result{ + ClassName: cls, + Schema: tc.properties, + } + if tc.SrcNotFound { + srcObj = nil + } + if tc.Stage >= 2 { + m.repo.On("Object", cls, id, mock.Anything, mock.Anything, "").Return(srcObj, tc.ErrSrcExists) + m.modulesProvider.On("UsingRef2Vec", mock.Anything).Return(false) + } + + if tc.Stage >= 3 { + m.repo.On("PutObject", mock.Anything, mock.Anything).Return(tc.ErrPutRefs).Once() + } + + err := m.DeleteObjectReference(context.Background(), nil, &tc.Req, nil, "") + if tc.WantCode != 0 { + code := 0 + if err != nil { + code = err.Code + } + if code != tc.WantCode { + t.Fatalf("code expected: %v, got %v", tc.WantCode, code) + } + + if tc.WantErr != nil && !errors.Is(err, tc.WantErr) { + t.Errorf("wrapped error expected: %v, got %v", tc.WantErr, err.Err) + } + + } else if tc.properties != nil { + refs, ok := srcObj.Schema.(map[string]interface{})[prop].(models.MultipleRef) + if g, w := len(refs), tc.NewSrcRefsLen; ok && g != w { + t.Errorf("length of source reference after deletion got:%v, want:%v", g, w) + } + + } + + m.repo.AssertExpectations(t) + }) + } +} + +func Test_ReferenceAdd_Ref2Vec(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + m := newFakeGetManager(articleSchemaForTest()) + + req := AddReferenceInput{ + Class: "Article", + ID: strfmt.UUID("e1a60252-c38c-496d-8e54-306e1cedc5c4"), + Property: "hasParagraphs", + Ref: models.SingleRef{ + Beacon: strfmt.URI("weaviate://localhost/Paragraph/494a2fe5-3e4c-4e9a-a47e-afcd9814f5ea"), + }, + } + + source := crossref.NewSource(schema.ClassName(req.Class), schema.PropertyName(req.Property), req.ID) + target := crossref.New("localhost", "Paragraph", "494a2fe5-3e4c-4e9a-a47e-afcd9814f5ea") + tenant := "randomTenant" + + parent := &search.Result{ + ID: strfmt.UUID("e1a60252-c38c-496d-8e54-306e1cedc5c4"), + ClassName: "Article", + Schema: map[string]interface{}{}, + } + + ref1 := &search.Result{ + ID: strfmt.UUID("494a2fe5-3e4c-4e9a-a47e-afcd9814f5ea"), + ClassName: "Paragraph", + Vector: []float32{2, 4, 6}, + } + + m.repo.On("Exists", "Article", parent.ID).Return(true, nil) + m.repo.On("Exists", "Paragraph", ref1.ID).Return(true, nil) + m.repo.On("Object", "Article", parent.ID, search.SelectProperties{}, additional.Properties{}, tenant).Return(parent, nil) + m.repo.On("Object", "Paragraph", ref1.ID, search.SelectProperties{}, additional.Properties{}, tenant).Return(ref1, nil) + m.repo.On("AddReference", source, target).Return(nil) + m.modulesProvider.On("UsingRef2Vec", mock.Anything).Return(true) + m.modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(ref1.Vector, nil) + m.repo.On("PutObject", mock.Anything, ref1.Vector).Return(nil) + err := m.Manager.AddObjectReference(ctx, nil, &req, nil, tenant) + assert.Nil(t, err) +} + +func Test_ReferenceDelete_Ref2Vec(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + m := newFakeGetManager(articleSchemaForTest()) + + req := DeleteReferenceInput{ + Class: "Article", + ID: strfmt.UUID("e1a60252-c38c-496d-8e54-306e1cedc5c4"), + Property: "hasParagraphs", + Reference: models.SingleRef{ + Beacon: strfmt.URI("weaviate://localhost/Paragraph/494a2fe5-3e4c-4e9a-a47e-afcd9814f5ea"), + }, + } + + tenant := "randomTenant" + + parent := &search.Result{ + ID: strfmt.UUID("e1a60252-c38c-496d-8e54-306e1cedc5c4"), + ClassName: "Article", + Schema: map[string]interface{}{}, + } + + ref1 := &search.Result{ + ID: strfmt.UUID("494a2fe5-3e4c-4e9a-a47e-afcd9814f5ea"), + ClassName: "Paragraph", + Vector: []float32{2, 4, 6}, + } + + m.repo.On("Exists", "Article", parent.ID).Return(true, nil) + m.repo.On("Exists", "Paragraph", ref1.ID).Return(true, nil) + m.repo.On("Object", req.Class, req.ID, search.SelectProperties{}, additional.Properties{}, tenant).Return(parent, nil) + m.repo.On("PutObject", parent.Object(), []float32(nil)).Return(nil) + m.modulesProvider.On("UsingRef2Vec", mock.Anything).Return(true) + + err := m.Manager.DeleteObjectReference(ctx, nil, &req, nil, tenant) + assert.Nil(t, err) +} + +func articleSchemaForTest() schema.Schema { + return schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "Paragraph", + VectorIndexConfig: hnsw.UserConfig{}, + Properties: []*models.Property{ + { + Name: "contents", + DataType: []string{"text"}, + }, + }, + }, + { + Class: "Article", + VectorIndexConfig: hnsw.UserConfig{}, + Properties: []*models.Property{ + { + Name: "title", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "hasParagraphs", + DataType: []string{"Paragraph"}, + }, + }, + Vectorizer: "ref2vec-centroid", + ModuleConfig: map[string]interface{}{ + "ref2vec-centroid": map[string]interface{}{ + "referenceProperties": []string{"hasParagraphs"}, + "method": "mean", + }, + }, + }, + }, + }, + } +} + +func zooAnimalSchemaForTest() schema.Schema { + return schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "ZooAction", + VectorIndexConfig: hnsw.UserConfig{}, + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "area", + DataType: []string{"number"}, + }, + { + Name: "employees", + DataType: []string{"int"}, + }, + { + Name: "located", + DataType: []string{"geoCoordinates"}, + }, + { + Name: "foundedIn", + DataType: []string{"date"}, + }, + { + Name: "hasAnimals", + DataType: []string{"AnimalAction"}, + }, + }, + }, + { + Class: "AnimalAction", + VectorIndexConfig: hnsw.UserConfig{}, + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + }, + { + Class: "Zoo", + VectorIndexConfig: hnsw.UserConfig{}, + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + { + Name: "area", + DataType: []string{"number"}, + }, + { + Name: "employees", + DataType: []string{"int"}, + }, + { + Name: "located", + DataType: []string{"geoCoordinates"}, + }, + { + Name: "foundedIn", + DataType: []string{"date"}, + }, + { + Name: "hasAnimals", + DataType: []string{"Animal"}, + }, + { + Name: "emptyType", + DataType: []string{""}, + }, + }, + }, + { + Class: "Animal", + VectorIndexConfig: hnsw.UserConfig{}, + Properties: []*models.Property{ + { + Name: "name", + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + }, + }, + }, + { + Class: "NotVectorized", + VectorIndexConfig: hnsw.UserConfig{}, + Properties: []*models.Property{ + { + Name: "description", + DataType: []string{"text"}, + }, + }, + Vectorizer: "none", + }, + }, + }, + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/references_update.go b/platform/dbops/binaries/weaviate-src/usecases/objects/references_update.go new file mode 100644 index 0000000000000000000000000000000000000000..237c08406e39e4fd63cd0eaa096dfccf41a81a12 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/references_update.go @@ -0,0 +1,188 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "errors" + "fmt" + + "github.com/weaviate/weaviate/usecases/auth/authorization" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/classcache" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/usecases/objects/validation" +) + +// PutReferenceInput represents required inputs to add a reference to an existing object. +type PutReferenceInput struct { + // Class name + Class string + // ID of an object + ID strfmt.UUID + // Property name + Property string + // Ref cross reference + Refs models.MultipleRef +} + +// UpdateObjectReferences of a specific data object. If the class contains a network +// ref, it has a side-effect on the schema: The schema will be updated to +// include this particular network ref class. +func (m *Manager) UpdateObjectReferences(ctx context.Context, principal *models.Principal, + input *PutReferenceInput, repl *additional.ReplicationProperties, tenant string, +) *Error { + m.metrics.UpdateReferenceInc() + defer m.metrics.UpdateReferenceDec() + + ctx = classcache.ContextWithClassCache(ctx) + input.Class = schema.UppercaseClassName(input.Class) + input.Class, _ = m.resolveAlias(input.Class) + + if err := m.authorizer.Authorize(ctx, principal, authorization.UPDATE, authorization.ShardsData(input.Class, tenant)...); err != nil { + return &Error{err.Error(), StatusForbidden, err} + } + + if input.Class == "" { + if err := m.authorizer.Authorize(ctx, principal, authorization.READ, authorization.Collections()...); err != nil { + return &Error{err.Error(), StatusForbidden, err} + } + } + + res, err := m.getObjectFromRepo(ctx, input.Class, input.ID, additional.Properties{}, nil, tenant) + if err != nil { + errnf := ErrNotFound{} + if errors.As(err, &errnf) { + if input.Class == "" { // for backward comp reasons + return &Error{"source object deprecated", StatusBadRequest, err} + } + return &Error{"source object", StatusNotFound, err} + } else if errors.As(err, &ErrMultiTenancy{}) { + return &Error{"source object", StatusUnprocessableEntity, err} + } + return &Error{"source object", StatusInternalServerError, err} + } + input.Class = res.ClassName + + if err := validateReferenceName(input.Class, input.Property); err != nil { + return &Error{err.Error(), StatusBadRequest, err} + } + + class, schemaVersion, _, typedErr := m.getAuthorizedFromClass(ctx, principal, input.Class) + if typedErr != nil { + return typedErr + } + + validator := validation.New(m.vectorRepo.Exists, m.config, repl) + parsedTargetRefs, err := input.validate(validator, class) + if err != nil { + if errors.As(err, &ErrMultiTenancy{}) { + return &Error{"bad inputs", StatusUnprocessableEntity, err} + } + return &Error{"bad inputs", StatusBadRequest, err} + } + + previouslyAuthorized := map[string]struct{}{} + for i := range input.Refs { + if parsedTargetRefs[i].Class == "" { + toClass, toBeacon, replace, err := m.autodetectToClass(class, input.Property, parsedTargetRefs[i]) + if err != nil { + return err + } + + if replace { + input.Refs[i].Class = toClass + input.Refs[i].Beacon = toBeacon + parsedTargetRefs[i].Class = string(toClass) + } + } + + // only check authZ once per class/tenant combination + checkName := parsedTargetRefs[i].Class + "#" + tenant + if _, ok := previouslyAuthorized[checkName]; !ok { + if err := m.authorizer.Authorize(ctx, principal, authorization.READ, authorization.ShardsData(parsedTargetRefs[i].Class, tenant)...); err != nil { + return &Error{err.Error(), StatusForbidden, err} + } + previouslyAuthorized[checkName] = struct{}{} + } + + if err := input.validateExistence(ctx, validator, tenant, parsedTargetRefs[i]); err != nil { + return &Error{"validate existence", StatusBadRequest, err} + } + } + + obj := res.Object() + if obj.Properties == nil { + obj.Properties = map[string]interface{}{input.Property: input.Refs} + } else { + obj.Properties.(map[string]interface{})[input.Property] = input.Refs + } + obj.LastUpdateTimeUnix = m.timeSource.Now() + + // Ensure that the local schema has caught up to the version we used to validate + if err := m.schemaManager.WaitForUpdate(ctx, schemaVersion); err != nil { + return &Error{ + Msg: fmt.Sprintf("error waiting for local schema to catch up to version %d", schemaVersion), + Code: StatusInternalServerError, + Err: err, + } + } + vectors, multiVectors, err := dto.GetVectors(res.Vectors) + if err != nil { + return &Error{"repo.putobject", StatusInternalServerError, fmt.Errorf("cannot get vectors: %w", err)} + } + err = m.vectorRepo.PutObject(ctx, obj, res.Vector, vectors, multiVectors, repl, schemaVersion) + if err != nil { + return &Error{"repo.putobject", StatusInternalServerError, err} + } + return nil +} + +func (req *PutReferenceInput) validate(v *validation.Validator, class *models.Class) ([]*crossref.Ref, error) { + refs, err := v.ValidateMultipleRef(req.Refs) + if err != nil { + return nil, err + } + + return refs, validateReferenceSchema(class, req.Property) +} + +func (req *PutReferenceInput) validateExistence( + ctx context.Context, + v *validation.Validator, tenant string, ref *crossref.Ref, +) error { + return v.ValidateExistence(ctx, ref, "validate reference", tenant) +} + +func validateReferenceSchema(c *models.Class, property string) error { + prop, err := schema.GetPropertyByName(c, property) + if err != nil { + return err + } + + classGetterFunc := func(string) *models.Class { return c } + dt, err := schema.FindPropertyDataTypeWithRefs(classGetterFunc, prop.DataType, false, "") + if err != nil { + return err + } + + if !dt.IsReference() { + return fmt.Errorf("property '%s' is not a reference-type", property) + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/replication.go b/platform/dbops/binaries/weaviate-src/usecases/objects/replication.go new file mode 100644 index 0000000000000000000000000000000000000000..7bd3725347e9b83c6c182c9fd74b1b56351be9b1 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/replication.go @@ -0,0 +1,113 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "encoding/json" + "fmt" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/models" +) + +// VObject is a versioned object for detecting replication inconsistencies +type VObject struct { + // ID of the Object. + // Format: uuid + ID strfmt.UUID `json:"id,omitempty"` + + Deleted bool `json:"deleted"` + + // Timestamp of the last Object update in milliseconds since epoch UTC. + LastUpdateTimeUnixMilli int64 `json:"lastUpdateTimeUnixMilli,omitempty"` + + // LatestObject is to most up-to-date version of an object + LatestObject *models.Object `json:"object,omitempty"` + + Vector []float32 `json:"vector"` + Vectors map[string][]float32 `json:"vectors"` + MultiVectors map[string][][]float32 `json:"multiVectors"` + + // StaleUpdateTime is the LastUpdateTimeUnix of the stale object sent to the coordinator + StaleUpdateTime int64 `json:"updateTime,omitempty"` + + // Version is the most recent incremental version number of the object + Version uint64 `json:"version"` +} + +// vobjectMarshaler is a helper for the methods implementing encoding.BinaryMarshaler +// +// Because models.Object has an optimized custom MarshalBinary method, that is what +// we want to use when serializing, rather than json.Marshal. This is just a thin +// wrapper around the model bytes resulting from the underlying call to MarshalBinary +type vobjectMarshaler struct { + ID strfmt.UUID + Deleted bool + LastUpdateTimeUnixMilli int64 + StaleUpdateTime int64 + Version uint64 + Vector []float32 + Vectors map[string][]float32 + MultiVectors map[string][][]float32 + LatestObject []byte +} + +func (vo *VObject) MarshalBinary() ([]byte, error) { + b := vobjectMarshaler{ + ID: vo.ID, + Deleted: vo.Deleted, + LastUpdateTimeUnixMilli: vo.LastUpdateTimeUnixMilli, + StaleUpdateTime: vo.StaleUpdateTime, + Vector: vo.Vector, + Vectors: vo.Vectors, + MultiVectors: vo.MultiVectors, + Version: vo.Version, + } + if vo.LatestObject != nil { + obj, err := vo.LatestObject.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("marshal object: %w", err) + } + b.LatestObject = obj + } + + return json.Marshal(b) +} + +func (vo *VObject) UnmarshalBinary(data []byte) error { + var b vobjectMarshaler + + err := json.Unmarshal(data, &b) + if err != nil { + return err + } + + vo.ID = b.ID + vo.Deleted = b.Deleted + vo.LastUpdateTimeUnixMilli = b.LastUpdateTimeUnixMilli + vo.StaleUpdateTime = b.StaleUpdateTime + vo.Vector = b.Vector + vo.Vectors = b.Vectors + vo.MultiVectors = b.MultiVectors + vo.Version = b.Version + + if b.LatestObject != nil { + var obj models.Object + err = obj.UnmarshalBinary(b.LatestObject) + if err != nil { + return fmt.Errorf("unmarshal object: %w", err) + } + vo.LatestObject = &obj + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/update.go b/platform/dbops/binaries/weaviate-src/usecases/objects/update.go new file mode 100644 index 0000000000000000000000000000000000000000..4fa17f2a74c00edf661d8b3feff0c3286282586f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/update.go @@ -0,0 +1,130 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "fmt" + + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/versioned" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/classcache" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + "github.com/weaviate/weaviate/usecases/memwatch" +) + +// UpdateObject updates object of class. +// If the class contains a network ref, it has a side effect on the schema: The schema will be updated to +// include this particular network ref class. +func (m *Manager) UpdateObject(ctx context.Context, principal *models.Principal, + class string, id strfmt.UUID, updates *models.Object, + repl *additional.ReplicationProperties, +) (*models.Object, error) { + className := schema.UppercaseClassName(updates.Class) + className, _ = m.resolveAlias(className) + updates.Class = className + + if err := m.authorizer.Authorize(ctx, principal, authorization.UPDATE, authorization.Objects(updates.Class, updates.Tenant, updates.ID)); err != nil { + return nil, err + } + + ctx = classcache.ContextWithClassCache(ctx) + // we don't reveal any info that the end users cannot get through the structure of the data anyway + fetchedClasses, err := m.schemaManager.GetCachedClassNoAuth(ctx, className) + if err != nil { + return nil, err + } + + m.metrics.UpdateObjectInc() + defer m.metrics.UpdateObjectDec() + + if err := m.allocChecker.CheckAlloc(memwatch.EstimateObjectMemory(updates)); err != nil { + m.logger.WithError(err).Errorf("memory pressure: cannot process update object") + return nil, fmt.Errorf("cannot process update object: %w", err) + } + + return m.updateObjectToConnectorAndSchema(ctx, principal, class, id, updates, repl, fetchedClasses) +} + +func (m *Manager) updateObjectToConnectorAndSchema(ctx context.Context, + principal *models.Principal, className string, id strfmt.UUID, updates *models.Object, + repl *additional.ReplicationProperties, fetchedClasses map[string]versioned.Class, +) (*models.Object, error) { + if cls := m.schemaManager.ResolveAlias(className); cls != "" { + className = cls + } + + if id != updates.ID { + return nil, NewErrInvalidUserInput("invalid update: field 'id' is immutable") + } + + obj, err := m.getObjectFromRepo(ctx, className, id, additional.Properties{}, repl, updates.Tenant) + if err != nil { + return nil, err + } + + maxSchemaVersion := fetchedClasses[className].Version + schemaVersion, err := m.autoSchemaManager.autoSchema(ctx, principal, false, fetchedClasses, updates) + if err != nil { + return nil, NewErrInvalidUserInput("invalid object: %v", err) + } + if schemaVersion > maxSchemaVersion { + maxSchemaVersion = schemaVersion + } + + m.logger. + WithField("object", "kinds_update_requested"). + WithField("original", obj). + WithField("updated", updates). + WithField("id", id). + Debug("received update kind request") + + class := fetchedClasses[className].Class + + prevObj := obj.Object() + err = m.validateObjectAndNormalizeNames(ctx, repl, updates, prevObj, fetchedClasses) + if err != nil { + return nil, NewErrInvalidUserInput("invalid object: %v", err) + } + + // Set the original creation timestamp before call to put, + // otherwise it is lost. This is because `class` is unmarshalled + // directly from the request body, therefore `CreationTimeUnix` + // inherits the zero value. + updates.CreationTimeUnix = obj.Created + updates.LastUpdateTimeUnix = m.timeSource.Now() + + err = m.modulesProvider.UpdateVector(ctx, updates, class, m.findObject, m.logger) + if err != nil { + return nil, NewErrInternal("update object: %v", err) + } + + if err := m.schemaManager.WaitForUpdate(ctx, maxSchemaVersion); err != nil { + return nil, fmt.Errorf("error waiting for local schema to catch up to version %d: %w", maxSchemaVersion, err) + } + + vectors, multiVectors, err := dto.GetVectors(updates.Vectors) + if err != nil { + return nil, fmt.Errorf("put object: cannot get vectors: %w", err) + } + err = m.vectorRepo.PutObject(ctx, updates, updates.Vector, vectors, multiVectors, repl, maxSchemaVersion) + if err != nil { + return nil, fmt.Errorf("put object: %w", err) + } + + return updates, nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/update_test.go b/platform/dbops/binaries/weaviate-src/usecases/objects/update_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9d813a395d846b5e2c283d2e9f36d69b653eecfd --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/update_test.go @@ -0,0 +1,190 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/search" + enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw" + "github.com/weaviate/weaviate/usecases/auth/authorization/mocks" + "github.com/weaviate/weaviate/usecases/config" +) + +func Test_UpdateAction(t *testing.T) { + var ( + db *fakeVectorRepo + modulesProvider *fakeModulesProvider + manager *Manager + extender *fakeExtender + projectorFake *fakeProjector + ) + + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: "ActionClass", + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + Properties: []*models.Property{ + { + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + Name: "foo", + }, + }, + }, + }, + }, + } + + reset := func() { + db = &fakeVectorRepo{} + schemaManager := &fakeSchemaManager{ + GetSchemaResponse: schema, + } + cfg := &config.WeaviateConfig{} + cfg.Config.QueryDefaults.Limit = 20 + cfg.Config.QueryMaximumResults = 200 + authorizer := mocks.NewMockAuthorizer() + logger, _ := test.NewNullLogger() + extender = &fakeExtender{} + projectorFake = &fakeProjector{} + metrics := &fakeMetrics{} + modulesProvider = getFakeModulesProviderWithCustomExtenders(extender, projectorFake) + manager = NewManager(schemaManager, cfg, + logger, authorizer, db, modulesProvider, metrics, nil, + NewAutoSchemaManager(schemaManager, db, cfg, authorizer, logger, prometheus.NewPedanticRegistry())) + } + + t.Run("ensure creation timestamp persists", func(t *testing.T) { + reset() + + beforeUpdate := time.Now().UnixNano() / int64(time.Millisecond) + id := strfmt.UUID("34e9df15-0c3b-468d-ab99-f929662834c7") + vec := []float32{0, 1, 2} + + result := &search.Result{ + ID: id, + ClassName: "ActionClass", + Schema: map[string]interface{}{"foo": "bar"}, + Created: beforeUpdate, + Updated: beforeUpdate, + } + db.On("ObjectByID", id, mock.Anything, mock.Anything).Return(result, nil).Once() + modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(vec, nil) + db.On("PutObject", mock.Anything, mock.Anything).Return(nil).Once() + + payload := &models.Object{ + Class: "ActionClass", + ID: id, + Properties: map[string]interface{}{"foo": "baz"}, + } + res, err := manager.UpdateObject(context.Background(), &models.Principal{}, "", id, payload, nil) + require.Nil(t, err) + expected := &models.Object{ + Class: "ActionClass", + ID: id, + Properties: map[string]interface{}{"foo": "baz"}, + CreationTimeUnix: beforeUpdate, + } + + afterUpdate := time.Now().UnixNano() / int64(time.Millisecond) + + assert.Equal(t, expected.Class, res.Class) + assert.Equal(t, expected.ID, res.ID) + assert.Equal(t, expected.Properties, res.Properties) + assert.Equal(t, expected.CreationTimeUnix, res.CreationTimeUnix) + assert.GreaterOrEqual(t, res.LastUpdateTimeUnix, beforeUpdate) + assert.LessOrEqual(t, res.LastUpdateTimeUnix, afterUpdate) + }) +} + +func Test_UpdateObject(t *testing.T) { + var ( + cls = "MyClass" + id = strfmt.UUID("34e9df15-0c3b-468d-ab99-f929662834c7") + beforeUpdate = (time.Now().UnixNano() - 2*int64(time.Millisecond)) / int64(time.Millisecond) + vec = []float32{0, 1, 2} + anyErr = errors.New("any error") + ) + + schema := schema.Schema{ + Objects: &models.Schema{ + Classes: []*models.Class{ + { + Class: cls, + VectorIndexConfig: enthnsw.NewDefaultUserConfig(), + Properties: []*models.Property{ + { + DataType: schema.DataTypeText.PropString(), + Tokenization: models.PropertyTokenizationWhitespace, + Name: "foo", + }, + }, + }, + }, + }, + } + + m := newFakeGetManager(schema) + payload := &models.Object{ + Class: cls, + ID: id, + Properties: map[string]interface{}{"foo": "baz"}, + } + // the object might not exist + m.repo.On("Object", cls, id, mock.Anything, mock.Anything, "").Return(nil, anyErr).Once() + _, err := m.UpdateObject(context.Background(), &models.Principal{}, cls, id, payload, nil) + if err == nil { + t.Fatalf("must return an error if object() fails") + } + + result := &search.Result{ + ID: id, + ClassName: cls, + Schema: map[string]interface{}{"foo": "bar"}, + Created: beforeUpdate, + Updated: beforeUpdate, + } + m.repo.On("Object", cls, id, mock.Anything, mock.Anything, "").Return(result, nil).Once() + m.modulesProvider.On("UpdateVector", mock.Anything, mock.AnythingOfType(FindObjectFn)). + Return(vec, nil) + m.repo.On("PutObject", mock.Anything, mock.Anything).Return(nil).Once() + + expected := &models.Object{ + Class: cls, + ID: id, + Properties: map[string]interface{}{"foo": "baz"}, + CreationTimeUnix: beforeUpdate, + Vector: vec, + } + res, err := m.UpdateObject(context.Background(), &models.Principal{}, cls, id, payload, nil) + require.Nil(t, err) + if res.LastUpdateTimeUnix <= beforeUpdate { + t.Error("time after update must be greater than time before update ") + } + res.LastUpdateTimeUnix = 0 // to allow for equality + assert.Equal(t, expected, res) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/validate.go b/platform/dbops/binaries/weaviate-src/usecases/objects/validate.go new file mode 100644 index 0000000000000000000000000000000000000000..d6c63b92d0ffecef4883815b62a78cdd5722e055 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/validate.go @@ -0,0 +1,60 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "errors" + + "github.com/weaviate/weaviate/entities/classcache" + + "github.com/weaviate/weaviate/entities/schema" + + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/usecases/auth/authorization" + autherrs "github.com/weaviate/weaviate/usecases/auth/authorization/errors" +) + +// ValidateObject without adding it to the database. Can be used in UIs for +// async validation before submitting +func (m *Manager) ValidateObject(ctx context.Context, principal *models.Principal, + obj *models.Object, repl *additional.ReplicationProperties, +) error { + className := schema.UppercaseClassName(obj.Class) + className, _ = m.resolveAlias(className) + obj.Class = className + + err := m.authorizer.Authorize(ctx, principal, authorization.READ, authorization.Objects(className, obj.Tenant, obj.ID)) + if err != nil { + return err + } + + ctx = classcache.ContextWithClassCache(ctx) + + // we don't reveal any info that the end users cannot get through the structure of the data anyway + fetchedClasses, err := m.schemaManager.GetCachedClassNoAuth(ctx, className) + if err != nil { + return err + } + + err = m.validateObjectAndNormalizeNames(ctx, repl, obj, nil, fetchedClasses) + if err != nil { + var forbidden autherrs.Forbidden + if errors.As(err, &forbidden) { + return err + } + return NewErrInvalidUserInput("invalid object: %v", err) + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/objects/vector.go b/platform/dbops/binaries/weaviate-src/usecases/objects/vector.go new file mode 100644 index 0000000000000000000000000000000000000000..d0a55072f7055e3fc4ce9d76c8a48f6c043a8500 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/objects/vector.go @@ -0,0 +1,95 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package objects + +import ( + "context" + "fmt" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/dto" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/search" +) + +func (m *Manager) updateRefVector(ctx context.Context, principal *models.Principal, + className string, id strfmt.UUID, tenant string, class *models.Class, schemaVersion uint64, +) error { + if m.modulesProvider.UsingRef2Vec(className) { + parent, err := m.vectorRepo.Object(ctx, className, id, + search.SelectProperties{}, additional.Properties{}, nil, tenant) + if err != nil { + return fmt.Errorf("find parent '%s/%s': %w", + className, id, err) + } + + obj := parent.Object() + + class, err := m.schemaManager.GetClass(ctx, principal, className) + if err != nil { + return err + } + + if err := m.modulesProvider.UpdateVector( + ctx, obj, class, m.findObject, m.logger); err != nil { + return fmt.Errorf("calculate ref vector for '%s/%s': %w", + className, id, err) + } + + if err := m.schemaManager.WaitForUpdate(ctx, schemaVersion); err != nil { + return fmt.Errorf("error waiting for local schema to catch up to version %d: %w", schemaVersion, err) + } + + vectors, multiVectors, err := dto.GetVectors(obj.Vectors) + if err != nil { + return fmt.Errorf("put object: cannot get vectors: %w", err) + } + + if err := m.vectorRepo.PutObject(ctx, obj, obj.Vector, vectors, multiVectors, nil, schemaVersion); err != nil { + return fmt.Errorf("put object: %w", err) + } + + return nil + } + + // nothing to do + return nil +} + +// TODO: remove this method and just pass m.vectorRepo.Object to +// m.modulesProvider.UpdateVector when m.vectorRepo.ObjectByID +// is finally removed +func (m *Manager) findObject(ctx context.Context, class string, + id strfmt.UUID, props search.SelectProperties, addl additional.Properties, + tenant string, +) (*search.Result, error) { + // to support backwards compat + if class == "" { + return m.vectorRepo.ObjectByID(ctx, id, props, addl, tenant) + } + return m.vectorRepo.Object(ctx, class, id, props, addl, nil, tenant) +} + +// TODO: remove this method and just pass b.vectorRepo.Object to +// b.modulesProvider.UpdateVector when b.vectorRepo.ObjectByID +// is finally removed +func (b *BatchManager) findObject(ctx context.Context, class string, + id strfmt.UUID, props search.SelectProperties, addl additional.Properties, + tenant string, +) (*search.Result, error) { + // to support backwards compat + if class == "" { + return b.vectorRepo.ObjectByID(ctx, id, props, addl, tenant) + } + return b.vectorRepo.Object(ctx, class, id, props, addl, nil, tenant) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/ratelimiter/limiter.go b/platform/dbops/binaries/weaviate-src/usecases/ratelimiter/limiter.go new file mode 100644 index 0000000000000000000000000000000000000000..bca1313f424628b4cf0732b83ef55a328ccbd33f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/ratelimiter/limiter.go @@ -0,0 +1,60 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package ratelimiter + +import "sync/atomic" + +// Limiter is a thread-safe counter that can be used for rate-limiting requests +type Limiter struct { + max int64 + current int64 +} + +// New creates a [Limiter] with the specified maximum concurrent requests +func New(maxRequests int) *Limiter { + return &Limiter{ + max: int64(maxRequests), + } +} + +// If there is still room, TryInc, increases the counter and returns true. If +// there are too many concurrent requests it does not increase the counter and +// returns false +func (l *Limiter) TryInc() bool { + if l.max <= 0 { + return true + } + + new := atomic.AddInt64(&l.current, 1) + + if new <= l.max { + return true + } + + // undo unsuccessful increment + atomic.AddInt64(&l.current, -1) + return false +} + +func (l *Limiter) Dec() { + if l.max <= 0 { + return + } + + new := atomic.AddInt64(&l.current, -1) + if new < 0 { + // Should not happen unless some client called Dec multiple times. + // Try to reset current to 0. It's ok if swap doesn't happen, since + // someone else must've succeeded at fixing current value. + atomic.CompareAndSwapInt64(&l.current, new, 0) + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/ratelimiter/limiter_test.go b/platform/dbops/binaries/weaviate-src/usecases/ratelimiter/limiter_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c77ebd76d2106a8101dffe8619fabc82951c8a0a --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/ratelimiter/limiter_test.go @@ -0,0 +1,112 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package ratelimiter + +import ( + "math/rand" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestLimiter(t *testing.T) { + l := New(3) + + // add 3 requests, should all work + assert.True(t, l.TryInc()) + assert.True(t, l.TryInc()) + assert.True(t, l.TryInc()) + + // try to add one more, should fail + assert.False(t, l.TryInc()) + + // decrease and try again + l.Dec() + l.Dec() + assert.True(t, l.TryInc()) + assert.True(t, l.TryInc()) + assert.False(t, l.TryInc()) +} + +func TestLimiterConcurrently(t *testing.T) { + var count int + lock := &sync.Mutex{} + + l := New(30) + + request := func() { + lock.Lock() + count++ + if count > 30 { + t.Fail() + } + lock.Unlock() + + time.Sleep(30 * time.Millisecond) + + lock.Lock() + count-- + lock.Unlock() + } + + wg := sync.WaitGroup{} + for i := 0; i < 1000; i++ { + wg.Add(1) + go func() { + defer wg.Done() + time.Sleep(time.Duration(rand.Intn(200)) * time.Millisecond) + if l.TryInc() { + request() + l.Dec() + } + }() + } + + wg.Wait() +} + +func TestLimiterUnlimited(t *testing.T) { + l := New(-1) + + for i := 0; i < 1000; i++ { + assert.True(t, l.TryInc()) + } + + for i := 0; i < 1000; i++ { + l.Dec() + } + + assert.True(t, l.TryInc()) +} + +func TestLimiterCantGoNegative(t *testing.T) { + l := New(3) + + for i := 0; i < 10; i++ { + l.Dec() + } + + for i := 0; i < 3; i++ { + assert.True(t, l.TryInc()) + } + assert.False(t, l.TryInc()) +} + +func BenchmarkLimiter(b *testing.B) { + l := New(-1) + for i := 0; i < b.N; i++ { + l.TryInc() + l.Dec() + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/replica/batch.go b/platform/dbops/binaries/weaviate-src/usecases/replica/batch.go new file mode 100644 index 0000000000000000000000000000000000000000..8a8d32942067abb5f3e535ae3d41414479b86ab8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/replica/batch.go @@ -0,0 +1,98 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replica + +import ( + "sort" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/storobj" +) + +// IndexedBatch holds an indexed list of objects +type IndexedBatch struct { + Data []*storobj.Object + // Index is z-index used to maintain object's order + Index []int +} + +// createBatch creates IndexedBatch from xs +func createBatch(xs []*storobj.Object) IndexedBatch { + var bi IndexedBatch + bi.Data = xs + bi.Index = make([]int, len(xs)) + for i := 0; i < len(xs); i++ { + bi.Index[i] = i + } + return bi +} + +// cluster data object by shard +func cluster(bi IndexedBatch) []ShardPart { + index := bi.Index + data := bi.Data + sort.Slice(index, func(i, j int) bool { + return data[index[i]].BelongsToShard < data[index[j]].BelongsToShard + }) + clusters := make([]ShardPart, 0, 16) + // partition + cur := data[index[0]] + j := 0 + for i := 1; i < len(index); i++ { + if data[index[i]].BelongsToShard == cur.BelongsToShard { + continue + } + clusters = append(clusters, ShardPart{ + Shard: cur.BelongsToShard, + Node: cur.BelongsToNode, Data: data, + Index: index[j:i], + }) + j = i + cur = data[index[j]] + + } + clusters = append(clusters, ShardPart{ + Shard: cur.BelongsToShard, + Node: cur.BelongsToNode, Data: data, + Index: index[j:], + }) + return clusters +} + +// ShardPart represents a data partition belonging to a physical shard +type ShardPart struct { + Shard string // one-to-one mapping between Shard and Node + Node string + + Data []*storobj.Object + Index []int // index for data +} + +func (b *ShardPart) ObjectIDs() []strfmt.UUID { + xs := make([]strfmt.UUID, len(b.Index)) + for i, idx := range b.Index { + xs[i] = b.Data[idx].ID() + } + return xs +} + +func (b *ShardPart) Extract() ([]Replica, []strfmt.UUID) { + xs := make([]Replica, len(b.Index)) + ys := make([]strfmt.UUID, len(b.Index)) + + for i, idx := range b.Index { + p := b.Data[idx] + xs[i] = Replica{ID: p.ID(), Deleted: false, Object: p} + ys[i] = p.ID() + } + return xs, ys +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/replica/batch_test.go b/platform/dbops/binaries/weaviate-src/usecases/replica/batch_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b7ec9abb71ebc9671526a607b97144e546bbafa5 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/replica/batch_test.go @@ -0,0 +1,149 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replica_test + +import ( + "sort" + "strconv" + "testing" + + "github.com/weaviate/weaviate/usecases/replica" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/cluster/router/types" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/storobj" +) + +// createBatch creates IndexedBatch from xs +func createBatch(xs []*storobj.Object) replica.IndexedBatch { + var bi replica.IndexedBatch + bi.Data = xs + bi.Index = make([]int, len(xs)) + for i := 0; i < len(xs); i++ { + bi.Index[i] = i + } + return bi +} + +// cluster data object by shard +func cluster(bi replica.IndexedBatch) []replica.ShardPart { + index := bi.Index + data := bi.Data + sort.Slice(index, func(i, j int) bool { + return data[index[i]].BelongsToShard < data[index[j]].BelongsToShard + }) + clusters := make([]replica.ShardPart, 0, 16) + // partition + cur := data[index[0]] + j := 0 + for i := 1; i < len(index); i++ { + if data[index[i]].BelongsToShard == cur.BelongsToShard { + continue + } + clusters = append(clusters, replica.ShardPart{ + Shard: cur.BelongsToShard, + Node: cur.BelongsToNode, Data: data, + Index: index[j:i], + }) + j = i + cur = data[index[j]] + + } + clusters = append(clusters, replica.ShardPart{ + Shard: cur.BelongsToShard, + Node: cur.BelongsToNode, Data: data, + Index: index[j:], + }) + return clusters +} + +func TestBatchInput(t *testing.T) { + var ( + N = 9 + ids = make([]strfmt.UUID, N) + data = make([]*storobj.Object, N) + ) + for i := 0; i < N; i++ { + uuid := strfmt.UUID(strconv.Itoa(i)) + ids[i] = uuid + data[i] = objectEx(uuid, 1, "S1", "N1") + } + parts := cluster(createBatch(data)) + assert.Len(t, parts, 1) + assert.Equal(t, parts[0], replica.ShardPart{ + Shard: "S1", + Node: "N1", + Data: data, + Index: []int{0, 1, 2, 3, 4, 5, 6, 7, 8}, + }) + assert.Equal(t, parts[0].ObjectIDs(), ids) + + data[0].BelongsToShard = "S2" + data[0].BelongsToNode = "N2" + data[2].BelongsToShard = "S2" + data[2].BelongsToNode = "N2" + data[3].BelongsToShard = "S2" + data[4].BelongsToNode = "N2" + data[5].BelongsToShard = "S2" + data[5].BelongsToNode = "N2" + + parts = cluster(createBatch(data)) + sort.Slice(parts, func(i, j int) bool { return len(parts[i].Index) < len(parts[j].Index) }) + assert.Len(t, parts, 2) + assert.ElementsMatch(t, parts[0].ObjectIDs(), []strfmt.UUID{ids[0], ids[2], ids[3], ids[5]}) + assert.Equal(t, parts[0].Shard, "S2") + assert.Equal(t, parts[0].Node, "N2") + + assert.ElementsMatch(t, parts[1].ObjectIDs(), []strfmt.UUID{ids[1], ids[4], ids[6], ids[7], ids[8]}) + assert.Equal(t, parts[1].Shard, "S1") + assert.Equal(t, parts[1].Node, "N1") +} + +func genInputs(node, shard string, updateTime int64, ids []strfmt.UUID) ([]*storobj.Object, []types.RepairResponse) { + xs := make([]*storobj.Object, len(ids)) + digestR := make([]types.RepairResponse, len(ids)) + for i, id := range ids { + xs[i] = &storobj.Object{ + Object: models.Object{ + ID: id, + LastUpdateTimeUnix: updateTime, + }, + BelongsToShard: shard, + BelongsToNode: node, + } + digestR[i] = types.RepairResponse{ID: ids[i].String(), UpdateTime: updateTime} + } + return xs, digestR +} + +func setObjectsConsistency(xs []*storobj.Object, isConsistent bool) []*storobj.Object { + want := make([]*storobj.Object, len(xs)) + for i, x := range xs { + cp := *x + cp.IsConsistent = isConsistent + want[i] = &cp + } + return want +} + +func objectEx(id strfmt.UUID, lastTime int64, shard, node string) *storobj.Object { + return &storobj.Object{ + Object: models.Object{ + ID: id, + LastUpdateTimeUnix: lastTime, + }, + BelongsToShard: shard, + BelongsToNode: node, + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/replica/config.go b/platform/dbops/binaries/weaviate-src/usecases/replica/config.go new file mode 100644 index 0000000000000000000000000000000000000000..0cb72c504257fa85528677817e23a279a959d923 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/replica/config.go @@ -0,0 +1,70 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replica + +import ( + "fmt" + + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/replication" +) + +type nodeCounter interface { + NodeCount() int +} + +func ValidateConfig(class *models.Class, globalCfg replication.GlobalConfig) error { + if class.ReplicationConfig == nil { + class.ReplicationConfig = &models.ReplicationConfig{ + Factor: int64(globalCfg.MinimumFactor), + DeletionStrategy: globalCfg.DeletionStrategy, + } + return nil + } + + if class.ReplicationConfig.Factor > 0 && class.ReplicationConfig.Factor < int64(globalCfg.MinimumFactor) { + return fmt.Errorf("invalid replication factor: setup requires a minimum replication factor of %d: got %d", + globalCfg.MinimumFactor, class.ReplicationConfig.Factor) + } + + if class.ReplicationConfig.Factor < 1 { + class.ReplicationConfig.Factor = int64(globalCfg.MinimumFactor) + } + + if globalCfg.DeletionStrategy != "" { + class.ReplicationConfig.DeletionStrategy = globalCfg.DeletionStrategy + } + + return nil +} + +func ValidateConfigUpdate(old, updated *models.Class, nodeCounter nodeCounter) error { + // This is not possible if schema is being updated via by a client. + // But for a test object that wasn't created by a client, it is. + if old.ReplicationConfig == nil || old.ReplicationConfig.Factor == 0 { + old.ReplicationConfig = &models.ReplicationConfig{Factor: 1} + } + + if updated.ReplicationConfig == nil { + updated.ReplicationConfig = &models.ReplicationConfig{Factor: 1} + } + + if old.ReplicationConfig.Factor != updated.ReplicationConfig.Factor { + nc := nodeCounter.NodeCount() + if int(updated.ReplicationConfig.Factor) > nc { + return fmt.Errorf("cannot scale to %d replicas, cluster has only %d nodes", + updated.ReplicationConfig.Factor, nc) + } + } + + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/replica/config_test.go b/platform/dbops/binaries/weaviate-src/usecases/replica/config_test.go new file mode 100644 index 0000000000000000000000000000000000000000..176bf00a137e12abb739e685c3da35bb5e5e6de8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/replica/config_test.go @@ -0,0 +1,124 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replica_test + +import ( + "fmt" + "testing" + + "github.com/weaviate/weaviate/usecases/replica" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/replication" +) + +func Test_ValidateConfig(t *testing.T) { + tests := []struct { + name string + initialconfig *models.ReplicationConfig + resultConfig *models.ReplicationConfig + globalConfig replication.GlobalConfig + expectedErr error + }{ + { + name: "config not provided", + initialconfig: nil, + resultConfig: &models.ReplicationConfig{Factor: 1}, + globalConfig: replication.GlobalConfig{MinimumFactor: 1}, + }, + { + name: "config not provided - global minimum is 2", + initialconfig: nil, + resultConfig: &models.ReplicationConfig{Factor: 2}, + globalConfig: replication.GlobalConfig{MinimumFactor: 2}, + }, + { + name: "config provided, factor not provided", + initialconfig: &models.ReplicationConfig{}, + resultConfig: &models.ReplicationConfig{Factor: 1}, + globalConfig: replication.GlobalConfig{MinimumFactor: 1}, + }, + { + name: "config provided, factor < 0", + initialconfig: &models.ReplicationConfig{Factor: -1}, + resultConfig: &models.ReplicationConfig{Factor: 1}, + globalConfig: replication.GlobalConfig{MinimumFactor: 1}, + }, + { + name: "config provided, valid factor", + initialconfig: &models.ReplicationConfig{Factor: 7}, + resultConfig: &models.ReplicationConfig{Factor: 7}, + }, + { + name: "explicitly trying to bypass the minimum leads to error", + initialconfig: &models.ReplicationConfig{Factor: 1}, + resultConfig: &models.ReplicationConfig{Factor: 1}, + globalConfig: replication.GlobalConfig{MinimumFactor: 2}, + expectedErr: fmt.Errorf("invalid replication factor: setup requires a minimum replication factor of 2: got 1"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + class := &models.Class{ + ReplicationConfig: test.initialconfig, + } + err := replica.ValidateConfig(class, test.globalConfig) + if test.expectedErr != nil { + assert.EqualError(t, test.expectedErr, err.Error()) + } else { + assert.Nil(t, err) + assert.EqualValues(t, test.resultConfig, class.ReplicationConfig) + } + }) + } +} + +func Test_ValidateConfigUpdate(t *testing.T) { + tests := []struct { + name string + initial *models.ReplicationConfig + update *models.ReplicationConfig + expectedError error + }{ + { + name: "attempting to increase replicas beyond cluster size", + initial: &models.ReplicationConfig{Factor: 3}, + update: &models.ReplicationConfig{Factor: 4}, + expectedError: fmt.Errorf( + "cannot scale to 4 replicas, cluster has only 3 nodes"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := replica.ValidateConfigUpdate( + &models.Class{ReplicationConfig: test.initial}, + &models.Class{ReplicationConfig: test.update}, + &fakeNodeCounter{3}) + if test.expectedError == nil { + assert.Nil(t, err) + } else { + require.NotNil(t, err, "update validation must error") + assert.Equal(t, test.expectedError.Error(), err.Error()) + } + }) + } +} + +type fakeNodeCounter struct{ count int } + +func (f *fakeNodeCounter) NodeCount() int { + return f.count +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/replica/coordinator.go b/platform/dbops/binaries/weaviate-src/usecases/replica/coordinator.go new file mode 100644 index 0000000000000000000000000000000000000000..2ffaa504ca866e130691692e3d16393b16758e5d --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/replica/coordinator.go @@ -0,0 +1,317 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replica + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/weaviate/weaviate/cluster/router/types" + "github.com/weaviate/weaviate/cluster/utils" + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/sirupsen/logrus" +) + +const ( + defaultPullBackOffInitialInterval = time.Millisecond * 250 + defaultPullBackOffMaxElapsedTime = time.Second * 128 +) + +type ( + // readyOp asks a replica if it is ready to commit + readyOp func(_ context.Context, host, requestID string) error + + // readyOp asks a replica to execute the actual operation + commitOp[T any] func(_ context.Context, host, requestID string) (T, error) + + // readOp defines a generic read operation + readOp[T any] func(_ context.Context, host string, fullRead bool) (T, error) + + // coordinator coordinates replication of write and read requests + coordinator[T any] struct { + Client + Router router + log logrus.FieldLogger + Class string + Shard string + TxID string // transaction ID + // wait twice this duration for the first Pull backoff for each host + pullBackOffPreInitialInterval time.Duration + pullBackOffMaxElapsedTime time.Duration // stop retrying after this long + deletionStrategy string + } +) + +// newCoordinator used by the replicator +func newCoordinator[T any](r *Replicator, shard, requestID string, l logrus.FieldLogger, +) *coordinator[T] { + return &coordinator[T]{ + Client: r.client, + Router: r.router, + log: l, + Class: r.class, + Shard: shard, + TxID: requestID, + pullBackOffPreInitialInterval: defaultPullBackOffInitialInterval / 2, + pullBackOffMaxElapsedTime: defaultPullBackOffMaxElapsedTime, + } +} + +// newCoordinator used by the Finder to read objects from replicas +func newReadCoordinator[T any](f *Finder, shard string, + pullBackOffInitivalInterval time.Duration, + pullBackOffMaxElapsedTime time.Duration, + deletionStrategy string, +) *coordinator[T] { + return &coordinator[T]{ + Router: f.router, + Class: f.class, + Shard: shard, + pullBackOffPreInitialInterval: pullBackOffInitivalInterval / 2, + pullBackOffMaxElapsedTime: pullBackOffMaxElapsedTime, + deletionStrategy: deletionStrategy, + } +} + +// broadcast sends write request to all replicas (first phase of a two-phase commit) +func (c *coordinator[T]) broadcast(ctx context.Context, + replicas []string, + op readyOp, level int, +) <-chan string { + // prepare tells replicas to be ready + prepare := func() <-chan _Result[string] { + resChan := make(chan _Result[string], len(replicas)) + f := func() { // broadcast + defer close(resChan) + var wg sync.WaitGroup + wg.Add(len(replicas)) + for _, replica := range replicas { + replica := replica + g := func() { + defer wg.Done() + err := op(ctx, replica, c.TxID) + resChan <- _Result[string]{replica, err} + } + enterrors.GoWrapper(g, c.log) + } + wg.Wait() + } + enterrors.GoWrapper(f, c.log) + return resChan + } + + // handle responses to prepare requests + replicaCh := make(chan string, len(replicas)) + f := func() { + defer close(replicaCh) + actives := make([]string, 0, level) // cache for active replicas + for r := range prepare() { + if r.Err != nil { // connection error + c.log.WithField("op", "broadcast").Error(r.Err) + continue + } + + level-- + if level > 0 { // cache since level has not been reached yet + actives = append(actives, r.Value) + continue + } + if level == 0 { // consistency level has been reached + for _, x := range actives { + replicaCh <- x + } + } + replicaCh <- r.Value + } + if level > 0 { // abort: nothing has been sent to the caller + fs := logrus.Fields{"op": "broadcast", "active": len(actives), "total": len(replicas)} + c.log.WithFields(fs).Error("abort") + for _, node := range replicas { + c.Abort(ctx, node, c.Class, c.Shard, c.TxID) + } + } + } + enterrors.GoWrapper(f, c.log) + return replicaCh +} + +// commitAll tells replicas to commit pending updates related to a specific request +// (second phase of a two-phase commit) +func (c *coordinator[T]) commitAll(ctx context.Context, + replicaCh <-chan string, + op commitOp[T], +) <-chan _Result[T] { + replyCh := make(chan _Result[T], cap(replicaCh)) + f := func() { // tells active replicas to commit + wg := sync.WaitGroup{} + for replica := range replicaCh { + wg.Add(1) + replica := replica + g := func() { + defer wg.Done() + resp, err := op(ctx, replica, c.TxID) + replyCh <- _Result[T]{resp, err} + } + enterrors.GoWrapper(g, c.log) + } + wg.Wait() + close(replyCh) + } + enterrors.GoWrapper(f, c.log) + + return replyCh +} + +// Push pushes updates to all replicas of a specific shard +func (c *coordinator[T]) Push(ctx context.Context, + cl types.ConsistencyLevel, + ask readyOp, + com commitOp[T], +) (<-chan _Result[T], int, error) { + options := c.Router.BuildRoutingPlanOptions(c.Shard, c.Shard, cl, "") + writeRoutingPlan, err := c.Router.BuildWriteRoutingPlan(options) + if err != nil { + return nil, 0, fmt.Errorf("%w : class %q shard %q", err, c.Class, c.Shard) + } + level := writeRoutingPlan.IntConsistencyLevel + //nolint:govet // we expressely don't want to cancel that context as the timeout will take care of it + ctxWithTimeout, _ := context.WithTimeout(context.Background(), 20*time.Second) + c.log.WithFields(logrus.Fields{ + "action": "coordinator_push", + "duration": 20 * time.Second, + "level": level, + }).Debug("context.WithTimeout") + nodeCh := c.broadcast(ctxWithTimeout, writeRoutingPlan.HostAddresses(), ask, level) + commitCh := c.commitAll(context.Background(), nodeCh, com) + + // if there are additional hosts, we do a "best effort" write to them + // where we don't wait for a response because they are not part of the + // replicas used to reach level consistency + if len(writeRoutingPlan.AdditionalHostAddresses()) > 0 { + additionalHostsBroadcast := c.broadcast(ctxWithTimeout, writeRoutingPlan.AdditionalHostAddresses(), ask, len(writeRoutingPlan.AdditionalHostAddresses())) + c.commitAll(context.Background(), additionalHostsBroadcast, com) + } + return commitCh, level, nil +} + +// Pull data from replica depending on consistency level, trying to reach level successful calls +// to op, while cycling through replicas for the coordinator's shard. +// +// Some invariants of this method (some callers depend on these): +// - Try the first fullread op on the directCandidate (if directCandidate is non-empty) +// - Only one successful fullread op will be performed +// - Query level replicas concurrently, and avoid querying more than level unless there are failures +// - Only send up to level messages onto replyCh +// - Only send error messages on replyCh once it's unlikely we'll ever reach level successes +// +// Note that the first retry for a given host, may happen before c.pullBackOff.initial has passed +func (c *coordinator[T]) Pull(ctx context.Context, + cl types.ConsistencyLevel, + op readOp[T], directCandidate string, + timeout time.Duration, +) (<-chan _Result[T], int, error) { + options := c.Router.BuildRoutingPlanOptions(c.Shard, c.Shard, cl, directCandidate) + readRoutingPlan, err := c.Router.BuildReadRoutingPlan(options) + if err != nil { + return nil, 0, fmt.Errorf("%w : class %q shard %q", err, c.Class, c.Shard) + } + level := readRoutingPlan.IntConsistencyLevel + hosts := readRoutingPlan.HostAddresses() + replyCh := make(chan _Result[T], level) + f := func() { + hostRetryQueue := make(chan hostRetry, len(hosts)) + + // put the "backups/fallbacks" on the retry queue + for i := level; i < len(hosts); i++ { + hostRetryQueue <- hostRetry{ + hosts[i], + backoff.WithContext(utils.NewExponentialBackoff(c.pullBackOffPreInitialInterval, c.pullBackOffMaxElapsedTime), ctx), + } + } + + // kick off only level workers so that we avoid querying nodes unnecessarily + wg := sync.WaitGroup{} + wg.Add(level) + for i := 0; i < level; i++ { + hostIndex := i + isFullReadWorker := hostIndex == 0 // first worker will perform the fullRead + workerFunc := func() { + defer wg.Done() + workerCtx, workerCancel := context.WithTimeout(ctx, timeout) + defer workerCancel() + // each worker will first try its corresponding host (eg worker0 tries hosts[0], + // worker1 tries hosts[1], etc). We want the fullRead to be tried on hosts[0] + // because that will be the direct candidate (if a direct candidate was provided), + // if we only used the retry queue then we would not have the guarantee that the + // fullRead will be tried on hosts[0] first. + resp, err := op(workerCtx, hosts[hostIndex], isFullReadWorker) + // TODO return retryable info here, for now should be fine since most errors are considered retryable + // TODO have increasing timeout passed into each op (eg 1s, 2s, 4s, 8s, 16s, 32s, with some max) similar to backoff? future PR? or should we just set timeout once per worker in Pull? + if err == nil { + replyCh <- _Result[T]{resp, err} + return + } + // this host failed op on the first try, put it on the retry queue + hostRetryQueue <- hostRetry{ + hosts[hostIndex], + backoff.WithContext(utils.NewExponentialBackoff(c.pullBackOffPreInitialInterval, c.pullBackOffMaxElapsedTime), ctx), + } + + // let's fallback to the backups in the retry queue + for hr := range hostRetryQueue { + resp, err := op(workerCtx, hr.host, isFullReadWorker) + if err == nil { + replyCh <- _Result[T]{resp, err} + return + } + nextBackOff := hr.currentBackOff.NextBackOff() + if nextBackOff == backoff.Stop { + // this host has run out of retries, send the result and note that + // we have the worker exit here with the assumption that once we've reached + // this many failures for this host, we've tried all other hosts enough + // that we're not going to reach level successes + replyCh <- _Result[T]{resp, err} + return + } + + timer := time.NewTimer(nextBackOff) + select { + case <-workerCtx.Done(): + timer.Stop() + replyCh <- _Result[T]{resp, err} + return + case <-timer.C: + hostRetryQueue <- hostRetry{hr.host, hr.currentBackOff} + } + timer.Stop() + } + } + enterrors.GoWrapper(workerFunc, c.log) + } + wg.Wait() + // callers of this function rely on replyCh being closed + close(replyCh) + } + enterrors.GoWrapper(f, c.log) + + return replyCh, level, nil +} + +// hostRetry tracks how long we should wait to retry this host again +type hostRetry struct { + host string + currentBackOff backoff.BackOff +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/replica/finder.go b/platform/dbops/binaries/weaviate-src/usecases/replica/finder.go new file mode 100644 index 0000000000000000000000000000000000000000..b8db1f855c9ff95c4f7f7e5fd0b16ce6a9141c31 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/replica/finder.go @@ -0,0 +1,450 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replica + +import ( + "context" + "errors" + "fmt" + "math/rand" + "strings" + "time" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/cluster/router/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/errorcompounder" + enterrors "github.com/weaviate/weaviate/entities/errors" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/replica/hashtree" +) + +var ( + // MsgCLevel consistency level cannot be achieved + MsgCLevel = "cannot achieve consistency level" + + ErrReplicas = errors.New("cannot reach enough replicas") + ErrRepair = errors.New("read repair error") + ErrRead = errors.New("read error") + + ErrNoDiffFound = errors.New("no diff found") +) + +type ( + // senderReply is a container for the data received from a replica + senderReply[T any] struct { + sender string // hostname of the sender + Version int64 // sender's current version of the object + Data T // the data sent by the sender + UpdateTime int64 // sender's current update time + DigestRead bool + } + findOneReply senderReply[Replica] + existReply struct { + Sender string + types.RepairResponse + } +) + +// Finder finds replicated objects +type Finder struct { + router router + nodeName string + finderStream // stream of objects + // control the op backoffs in the coordinator's Pull + coordinatorPullBackoffInitialInterval time.Duration + coordinatorPullBackoffMaxElapsedTime time.Duration +} + +// NewFinder constructs a new finder instance +func NewFinder(className string, + router router, + nodeName string, + client RClient, + l logrus.FieldLogger, + coordinatorPullBackoffInitialInterval time.Duration, + coordinatorPullBackoffMaxElapsedTime time.Duration, + getDeletionStrategy func() string, +) *Finder { + cl := FinderClient{client} + return &Finder{ + router: router, + nodeName: nodeName, + finderStream: finderStream{ + repairer: repairer{ + class: className, + getDeletionStrategy: getDeletionStrategy, + client: cl, + logger: l, + }, + log: l, + }, + coordinatorPullBackoffInitialInterval: coordinatorPullBackoffInitialInterval, + coordinatorPullBackoffMaxElapsedTime: coordinatorPullBackoffMaxElapsedTime, + } +} + +// GetOne gets object which satisfies the giving consistency +func (f *Finder) GetOne(ctx context.Context, + l types.ConsistencyLevel, shard string, + id strfmt.UUID, + props search.SelectProperties, + adds additional.Properties, +) (*storobj.Object, error) { + c := newReadCoordinator[findOneReply](f, shard, + f.coordinatorPullBackoffInitialInterval, f.coordinatorPullBackoffMaxElapsedTime, f.getDeletionStrategy()) + op := func(ctx context.Context, host string, fullRead bool) (findOneReply, error) { + if fullRead { + r, err := f.client.FullRead(ctx, host, f.class, shard, id, props, adds, 0) + + return findOneReply{host, 0, r, r.UpdateTime(), false}, err + } else { + xs, err := f.client.DigestReads(ctx, host, f.class, shard, []strfmt.UUID{id}, 0) + + var x types.RepairResponse + + if len(xs) == 1 { + x = xs[0] + } + + r := Replica{ + ID: id, + Deleted: x.Deleted, + LastUpdateTimeUnixMilli: x.UpdateTime, + } + + return findOneReply{host, x.Version, r, x.UpdateTime, true}, err + } + } + replyCh, level, err := c.Pull(ctx, l, op, "", 20*time.Second) + if err != nil { + f.log.WithField("op", "pull.one").Error(err) + return nil, fmt.Errorf("%s %q: %w", MsgCLevel, l, ErrReplicas) + } + result := <-f.readOne(ctx, shard, id, replyCh, level) + if err = result.Err; err != nil { + err = fmt.Errorf("%s %q: %w", MsgCLevel, l, err) + if strings.Contains(err.Error(), ErrConflictExistOrDeleted.Error()) { + err = objects.NewErrDirtyReadOfDeletedObject(err) + } + } + return result.Value, err +} + +func (f *Finder) FindUUIDs(ctx context.Context, + className, shard string, filters *filters.LocalFilter, l types.ConsistencyLevel, +) (uuids []strfmt.UUID, err error) { + c := newReadCoordinator[[]strfmt.UUID](f, shard, + f.coordinatorPullBackoffInitialInterval, f.coordinatorPullBackoffMaxElapsedTime, f.getDeletionStrategy()) + + op := func(ctx context.Context, host string, _ bool) ([]strfmt.UUID, error) { + return f.client.FindUUIDs(ctx, host, f.class, shard, filters) + } + + replyCh, _, err := c.Pull(ctx, l, op, "", 30*time.Second) + if err != nil { + f.log.WithField("op", "pull.one").Error(err) + return nil, fmt.Errorf("%s %q: %w", MsgCLevel, l, ErrReplicas) + } + + res := make(map[strfmt.UUID]struct{}) + + for r := range replyCh { + if r.Err != nil { + f.logger.WithField("op", "finder.find_uuids").WithError(r.Err).Debug("error in reply channel") + continue + } + + for _, uuid := range r.Value { + res[uuid] = struct{}{} + } + } + + uuids = make([]strfmt.UUID, 0, len(res)) + + for uuid := range res { + uuids = append(uuids, uuid) + } + + return uuids, err +} + +type ShardDesc struct { + Name string + Node string +} + +// CheckConsistency for objects belonging to different physical shards. +// +// For each x in xs the fields BelongsToNode and BelongsToShard must be set non empty +func (f *Finder) CheckConsistency(ctx context.Context, + l types.ConsistencyLevel, xs []*storobj.Object, +) error { + if len(xs) == 0 { + return nil + } + for i, x := range xs { // check shard and node name are set + if x == nil { + return fmt.Errorf("contains nil at object at index %d", i) + } + if x.BelongsToNode == "" || x.BelongsToShard == "" { + return fmt.Errorf("missing node or shard at index %d", i) + } + } + + if l == types.ConsistencyLevelOne { // already consistent + for i := range xs { + xs[i].IsConsistent = true + } + return nil + } + // check shard consistency concurrently + gr, ctx := enterrors.NewErrorGroupWithContextWrapper(f.logger, ctx) + for _, part := range cluster(createBatch(xs)) { + part := part + gr.Go(func() error { + _, err := f.checkShardConsistency(ctx, l, part) + if err != nil { + f.log.WithField("op", "check_shard_consistency"). + WithField("shard", part.Shard).Error(err) + } + return err + }, part) + } + return gr.Wait() +} + +// Exists checks if an object exists which satisfies the giving consistency +func (f *Finder) Exists(ctx context.Context, + l types.ConsistencyLevel, + shard string, + id strfmt.UUID, +) (bool, error) { + c := newReadCoordinator[existReply](f, shard, + f.coordinatorPullBackoffInitialInterval, f.coordinatorPullBackoffMaxElapsedTime, f.getDeletionStrategy()) + op := func(ctx context.Context, host string, _ bool) (existReply, error) { + xs, err := f.client.DigestReads(ctx, host, f.class, shard, []strfmt.UUID{id}, 0) + var x types.RepairResponse + if len(xs) == 1 { + x = xs[0] + } + return existReply{host, x}, err + } + replyCh, state, err := c.Pull(ctx, l, op, "", 20*time.Second) + if err != nil { + f.log.WithField("op", "pull.exist").Error(err) + return false, fmt.Errorf("%s %q: %w", MsgCLevel, l, ErrReplicas) + } + result := <-f.readExistence(ctx, shard, id, replyCh, state) + if err = result.Err; err != nil { + err = fmt.Errorf("%s %q: %w", MsgCLevel, l, err) + if strings.Contains(err.Error(), ErrConflictExistOrDeleted.Error()) { + err = objects.NewErrDirtyReadOfDeletedObject(err) + } + } + return result.Value, err +} + +// NodeObject gets object from a specific node. +// it is used mainly for debugging purposes +func (f *Finder) NodeObject(ctx context.Context, + nodeName, + shard string, + id strfmt.UUID, + props search.SelectProperties, adds additional.Properties, +) (*storobj.Object, error) { + host, ok := f.router.NodeHostname(nodeName) + if !ok || host == "" { + return nil, fmt.Errorf("cannot resolve node name: %s", nodeName) + } + r, err := f.client.FullRead(ctx, host, f.class, shard, id, props, adds, 9) + return r.Object, err +} + +// checkShardConsistency checks consistency for a set of objects belonging to a shard +// It returns the most recent objects or and error +func (f *Finder) checkShardConsistency(ctx context.Context, + l types.ConsistencyLevel, + batch ShardPart, +) ([]*storobj.Object, error) { + var ( + c = newReadCoordinator[BatchReply](f, batch.Shard, + f.coordinatorPullBackoffInitialInterval, f.coordinatorPullBackoffMaxElapsedTime, f.getDeletionStrategy()) + shard = batch.Shard + data, ids = batch.Extract() // extract from current content + ) + op := func(ctx context.Context, host string, fullRead bool) (BatchReply, error) { + if fullRead { // we already have the content + return BatchReply{Sender: host, IsDigest: false, FullData: data}, nil + } else { + xs, err := f.client.DigestReads(ctx, host, f.class, shard, ids, 0) + return BatchReply{Sender: host, IsDigest: true, DigestData: xs}, err + } + } + + replyCh, state, err := c.Pull(ctx, l, op, batch.Node, 20*time.Second) + if err != nil { + return nil, fmt.Errorf("pull shard: %w", ErrReplicas) + } + result := <-f.readBatchPart(ctx, batch, ids, replyCh, state) + return result.Value, result.Err +} + +type ShardDifferenceReader struct { + TargetNodeName string + TargetNodeAddress string + RangeReader hashtree.AggregatedHashTreeRangeReader +} + +// CollectShardDifferences collects the differences between the local node and the target nodes. +// It returns a ShardDifferenceReader that contains the differences and the target node name/address. +// If no differences are found, it returns ErrNoDiffFound. +// When ErrNoDiffFound is returned as the error, the returned *ShardDifferenceReader may exist +// and have some (but not all) of its fields set. +func (f *Finder) CollectShardDifferences(ctx context.Context, + shardName string, ht hashtree.AggregatedHashTree, diffTimeoutPerNode time.Duration, + targetNodeOverrides []additional.AsyncReplicationTargetNodeOverride, +) (diffReader *ShardDifferenceReader, err error) { + options := f.router.BuildRoutingPlanOptions(shardName, shardName, types.ConsistencyLevelOne, "") + routingPlan, err := f.router.BuildReadRoutingPlan(options) + if err != nil { + return nil, fmt.Errorf("%w : class %q shard %q", err, f.class, shardName) + } + + collectDiffForTargetNode := func(targetNodeAddress, targetNodeName string) (*ShardDifferenceReader, error) { + ctx, cancel := context.WithTimeout(ctx, diffTimeoutPerNode) + defer cancel() + + diff := hashtree.NewBitset(hashtree.NodesCount(ht.Height())) + + digests := make([]hashtree.Digest, hashtree.LeavesCount(ht.Height())) + + diff.Set(0) // init comparison at root level + + for l := 0; l <= ht.Height(); l++ { + _, err := ht.Level(l, diff, digests) + if err != nil { + return nil, fmt.Errorf("%q: %w", targetNodeAddress, err) + } + + levelDigests, err := f.client.HashTreeLevel(ctx, targetNodeAddress, f.class, shardName, l, diff) + if err != nil { + return nil, fmt.Errorf("%q: %w", targetNodeAddress, err) + } + if len(levelDigests) == 0 { + // no differences were found + break + } + + levelDiffCount := hashtree.LevelDiff(l, diff, digests, levelDigests) + if levelDiffCount == 0 { + // no differences were found + break + } + } + + if diff.SetCount() == 0 { + return &ShardDifferenceReader{ + TargetNodeName: targetNodeName, + TargetNodeAddress: targetNodeAddress, + }, ErrNoDiffFound + } + + return &ShardDifferenceReader{ + TargetNodeName: targetNodeName, + TargetNodeAddress: targetNodeAddress, + RangeReader: ht.NewRangeReader(diff), + }, nil + } + + ec := errorcompounder.New() + + // If the caller provided a list of target node overrides, filter the replicas to only include + // the relevant overrides so that we only "push" updates to the specified nodes. + localNodeName := f.LocalNodeName() + targetNodesToUse := routingPlan.NodeNames() + if len(targetNodeOverrides) > 0 { + targetNodesToUse = make([]string, 0, len(targetNodeOverrides)) + for _, override := range targetNodeOverrides { + if override.SourceNode == localNodeName && override.CollectionID == f.class && override.ShardID == shardName { + targetNodesToUse = append(targetNodesToUse, override.TargetNode) + } + } + } + + replicaNodeNames := make([]string, 0, len(routingPlan.Replicas())) + replicasHostAddrs := make([]string, 0, len(routingPlan.HostAddresses())) + for _, replica := range targetNodesToUse { + replicaNodeNames = append(replicaNodeNames, replica) + replicaHostAddr, ok := f.router.NodeHostname(replica) + if ok { + replicasHostAddrs = append(replicasHostAddrs, replicaHostAddr) + } + } + + // shuffle the replicas to randomize the order in which we look for differences + if len(replicasHostAddrs) > 1 { + // Use the global rand package which is thread-safe + rand.Shuffle(len(replicasHostAddrs), func(i, j int) { + replicaNodeNames[i], replicaNodeNames[j] = replicaNodeNames[j], replicaNodeNames[i] + replicasHostAddrs[i], replicasHostAddrs[j] = replicasHostAddrs[j], replicasHostAddrs[i] + }) + } + + localHostAddr, _ := f.router.NodeHostname(localNodeName) + + for i, targetNodeAddress := range replicasHostAddrs { + targetNodeName := replicaNodeNames[i] + if targetNodeAddress == localHostAddr { + continue + } + + diffReader, err := collectDiffForTargetNode(targetNodeAddress, targetNodeName) + if err != nil { + if !errors.Is(err, ErrNoDiffFound) { + ec.Add(err) + } + continue + } + + return diffReader, nil + } + + err = ec.ToError() + if err != nil { + return nil, err + } + + return &ShardDifferenceReader{}, ErrNoDiffFound +} + +func (f *Finder) DigestObjectsInRange(ctx context.Context, + shardName string, host string, initialUUID, finalUUID strfmt.UUID, limit int, +) (ds []types.RepairResponse, err error) { + return f.client.DigestObjectsInRange(ctx, host, f.class, shardName, initialUUID, finalUUID, limit) +} + +// Overwrite specified object with most recent contents +func (f *Finder) Overwrite(ctx context.Context, + host, index, shard string, xs []*objects.VObject, +) ([]types.RepairResponse, error) { + return f.client.Overwrite(ctx, host, index, shard, xs) +} + +func (f *Finder) LocalNodeName() string { + return f.nodeName +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/replica/finder_stream.go b/platform/dbops/binaries/weaviate-src/usecases/replica/finder_stream.go new file mode 100644 index 0000000000000000000000000000000000000000..d2dbb10f84972c42a0917ceb88eac0c2a6661d1f --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/replica/finder_stream.go @@ -0,0 +1,323 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replica + +import ( + "context" + "fmt" + "strings" + + "github.com/pkg/errors" + "github.com/weaviate/weaviate/cluster/router/types" + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus" + "github.com/weaviate/weaviate/entities/storobj" +) + +// pullSteam is used by the finder to pull objects from replicas +type finderStream struct { + repairer + log logrus.FieldLogger +} + +type ( + // tuple is a container for the data received from a replica + tuple[T any] struct { + Sender string + UTime int64 + O T + ack int + err error + } + + ObjTuple tuple[Replica] + ObjResult = _Result[*storobj.Object] +) + +// readOne reads one replicated object +func (f *finderStream) readOne(ctx context.Context, + shard string, + id strfmt.UUID, + ch <-chan _Result[findOneReply], + level int, +) <-chan ObjResult { + // counters tracks the number of votes for each participant + resultCh := make(chan ObjResult, 1) + g := func() { + defer close(resultCh) + var ( + votes = make([]ObjTuple, 0, level) + contentIdx = -1 + ) + + for r := range ch { // len(ch) == level + resp := r.Value + if r.Err != nil { // a least one node is not responding + f.log.WithField("op", "get").WithField("replica", resp.sender). + WithField("class", f.class).WithField("shard", shard). + WithField("uuid", id).Error(r.Err) + resultCh <- ObjResult{nil, ErrRead} + return + } + if !resp.DigestRead { + contentIdx = len(votes) + } + votes = append(votes, ObjTuple{resp.sender, resp.UpdateTime, resp.Data, 0, nil}) + + for i := range votes { + if votes[i].UTime != resp.UpdateTime { + // incomming response does not match current Vote + continue + } + + votes[i].ack++ + + if votes[i].ack < level { + // current Vote does not have enough acks + continue + } + + if votes[i].O.Deleted { + resultCh <- ObjResult{nil, nil} + return + } + if i == contentIdx { + // prefetched payload matches agreed Vote + resultCh <- ObjResult{votes[contentIdx].O.Object, nil} + return + } + } + } + + obj, err := f.repairOne(ctx, shard, id, votes, contentIdx) + if err == nil { + resultCh <- ObjResult{obj, nil} + return + } + + resultCh <- ObjResult{nil, errors.Wrap(err, ErrRepair.Error())} + var sb strings.Builder + for i, c := range votes { + if i != 0 { + sb.WriteByte(' ') + } + fmt.Fprintf(&sb, "%s:%d", c.Sender, c.UTime) + } + f.log.WithField("op", "repair_one").WithField("class", f.class). + WithField("shard", shard).WithField("uuid", id). + WithField("msg", sb.String()).Error(err) + } + enterrors.GoWrapper(g, f.logger) + return resultCh +} + +type ( + batchResult _Result[[]*storobj.Object] + + // Vote represents objects received from a specific replica and the number of votes per object. + Vote struct { + BatchReply // reply from a replica + Count []int // number of votes per object + Err error + } +) + +type BoolTuple tuple[types.RepairResponse] + +// readExistence checks if replicated object exists +func (f *finderStream) readExistence(ctx context.Context, + shard string, + id strfmt.UUID, + ch <-chan _Result[existReply], + level int, +) <-chan _Result[bool] { + resultCh := make(chan _Result[bool], 1) + g := func() { + defer close(resultCh) + votes := make([]BoolTuple, 0, level) // number of votes per replica + + for r := range ch { // len(ch) == st.Level + resp := r.Value + if r.Err != nil { // at least one node is not responding + f.log.WithField("op", "exists").WithField("replica", resp.Sender). + WithField("class", f.class).WithField("shard", shard). + WithField("uuid", id).Error(r.Err) + resultCh <- _Result[bool]{false, ErrRead} + return + } + + votes = append(votes, BoolTuple{resp.Sender, resp.UpdateTime, resp.RepairResponse, 0, nil}) + + for i := range votes { // count number of votes + if votes[i].UTime != resp.UpdateTime { + // incomming response does not match current Vote + continue + } + + votes[i].ack++ + + if votes[i].ack < level { + // current Vote does not have enough acks + continue + } + + exists := !votes[i].O.Deleted && votes[i].O.UpdateTime != 0 + resultCh <- _Result[bool]{exists, nil} + return + } + } + + obj, err := f.repairExist(ctx, shard, id, votes) + if err == nil { + resultCh <- _Result[bool]{obj, nil} + return + } + resultCh <- _Result[bool]{false, errors.Wrap(err, ErrRepair.Error())} + + var sb strings.Builder + for i, c := range votes { + if i != 0 { + sb.WriteByte(' ') + } + fmt.Fprintf(&sb, "%s:%d", c.Sender, c.UTime) + } + f.log.WithField("op", "repair_exist").WithField("class", f.class). + WithField("shard", shard).WithField("uuid", id). + WithField("msg", sb.String()).Error(err) + } + enterrors.GoWrapper(g, f.logger) + return resultCh +} + +// readBatchPart reads in replicated objects specified by their ids +// It checks each object x for consistency and sets x.IsConsistent +func (f *finderStream) readBatchPart(ctx context.Context, + batch ShardPart, + ids []strfmt.UUID, + ch <-chan _Result[BatchReply], + level int, +) <-chan batchResult { + resultCh := make(chan batchResult, 1) + + g := func() { + defer close(resultCh) + var ( + N = len(ids) // number of requested objects + // votes counts number of votes per object for each node + votes = make([]Vote, 0, level) + contentIdx = -1 // index of full read reply + ) + + for r := range ch { // len(ch) == level + resp := r.Value + if r.Err != nil { // at least one node is not responding + f.log.WithField("op", "read_batch.get").WithField("replica", r.Value.Sender). + WithField("class", f.class).WithField("shard", batch.Shard).Error(r.Err) + resultCh <- batchResult{nil, ErrRead} + return + } + if !resp.IsDigest { + contentIdx = len(votes) + } + + votes = append(votes, Vote{resp, make([]int, N), nil}) + M := 0 + for i := 0; i < N; i++ { + max := 0 + maxAt := -1 + lastTime := resp.UpdateTimeAt(i) + + for j := range votes { // count votes + if votes[j].UpdateTimeAt(i) == lastTime { + votes[j].Count[i]++ + } + if max < votes[j].Count[i] { + max = votes[j].Count[i] + maxAt = j + } + } + if max >= level && maxAt == contentIdx { + M++ + } + } + + if M == N { // all objects are consistent + for _, idx := range batch.Index { + batch.Data[idx].IsConsistent = true + } + resultCh <- batchResult{fromReplicas(votes[contentIdx].FullData), nil} + return + } + } + res, err := f.repairBatchPart(ctx, batch.Shard, ids, votes, contentIdx) + if err != nil { + resultCh <- batchResult{nil, ErrRepair} + f.log.WithField("op", "repair_batch").WithField("class", f.class). + WithField("shard", batch.Shard).WithField("uuids", ids).Error(err) + return + } + // count total number of votes + maxCount := len(votes) * len(votes) + sum := votes[0].Count + for _, vote := range votes[1:] { + for i, n := range vote.Count { + sum[i] += n + } + } + // set consistency flag + for i, n := range sum { + if n == maxCount { // if consistent + x := res[i] + + if x == nil { + // object was fetched but deleted during repair phase + batch.Data[batch.Index[i]].IsConsistent = false + continue + } + + prev := batch.Data[batch.Index[i]] + x.BelongsToShard = prev.BelongsToShard + x.BelongsToNode = prev.BelongsToNode + batch.Data[batch.Index[i]] = x + x.IsConsistent = true + } + } + + resultCh <- batchResult{res, nil} + } + enterrors.GoWrapper(g, f.logger) + + return resultCh +} + +// BatchReply is a container of the batch received from a replica +// The returned data may result from a full or digest read request +type BatchReply struct { + // Sender hostname of the Sender + Sender string + // IsDigest is this reply from a digest read? + IsDigest bool + // FullData returned from a full read request + FullData []Replica + // DigestData returned from a digest read request + DigestData []types.RepairResponse +} + +// UpdateTimeAt gets update time from reply +func (r BatchReply) UpdateTimeAt(idx int) int64 { + if len(r.DigestData) != 0 { + return r.DigestData[idx].UpdateTime + } + return r.FullData[idx].UpdateTime() +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/replica/finder_test.go b/platform/dbops/binaries/weaviate-src/usecases/replica/finder_test.go new file mode 100644 index 0000000000000000000000000000000000000000..322d180537582933d4486408d69956f1307579a3 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/replica/finder_test.go @@ -0,0 +1,987 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replica_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/weaviate/weaviate/usecases/replica" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/assert" + "github.com/weaviate/weaviate/cluster/router/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/storobj" +) + +func object(id strfmt.UUID, lastTime int64) *storobj.Object { + return &storobj.Object{ + Object: models.Object{ + ID: id, + LastUpdateTimeUnix: lastTime, + }, + } +} + +func objectWithVectors(id strfmt.UUID, lastTime int64, vectors map[string][]float32) *storobj.Object { + vectors2 := make(models.Vectors, len(vectors)) + for k, vec := range vectors { + vectors2[k] = vec + } + return &storobj.Object{ + Object: models.Object{ + ID: id, + LastUpdateTimeUnix: lastTime, + Vectors: vectors2, + }, + Vectors: vectors, + } +} + +func repl(id strfmt.UUID, lastTime int64, deleted bool) replica.Replica { + x := replica.Replica{ + Deleted: deleted, + Object: &storobj.Object{ + Object: models.Object{ + ID: id, + LastUpdateTimeUnix: lastTime, + }, + }, + } + if !x.Deleted { + x.ID = id + } + return x +} + +func TestFinderCantReachEnoughReplicas(t *testing.T) { + testCases := []struct { + variant string + isMultiTenant bool + }{ + {"MultiTenant", true}, + {"SingleTenant", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("CantReachEnoughReplicas_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", "S", []string{}, tc.isMultiTenant) + ctx = context.Background() + finder = f.newFinder("A") + ) + + finder.CheckConsistency(ctx, types.ConsistencyLevelAll, []*storobj.Object{objectEx("1", 1, "S", "N")}) + f.assertLogErrorContains(t, replica.ErrReplicas.Error()) + }) + } +} + +func TestFinderNodeObject(t *testing.T) { + var ( + id = strfmt.UUID("123") + cls = "C1" + shard = "SH1" + nodes = []string{"A", "B", "C"} + ctx = context.Background() + r = replica.Replica{ID: id, Object: object(id, 3)} + adds = additional.Properties{} + proj = search.SelectProperties{} + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + {"MultiTenant", true}, + {"SingleTenant", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("Unresolved_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder := f.newFinder("A") + _, err := finder.NodeObject(ctx, "N", "S", "id", nil, additional.Properties{}) + assert.Contains(t, err.Error(), "N") + }) + + t.Run(fmt.Sprintf("Success_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder := f.newFinder("A") + for _, n := range nodes { + f.RClient.On("FetchObject", anyVal, n, cls, shard, id, proj, adds).Return(r, nil) + } + got, err := finder.NodeObject(ctx, nodes[0], shard, id, proj, adds) + assert.Nil(t, err) + assert.Equal(t, r.Object, got) + }) + } +} + +func TestFinderGetOneWithConsistencyLevelALL(t *testing.T) { + var ( + id = strfmt.UUID("123") + cls = "C1" + shard = "SH1" + nodes = []string{"A", "B", "C"} + ctx = context.Background() + adds = additional.Properties{} + proj = search.SelectProperties{} + nilObject *storobj.Object + emptyItem = replica.Replica{} + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + {"MultiTenant", true}, + {"SingleTenant", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("AllButOne_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: object(id, 3)} + digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR, errAny) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR, nil) + + got, err := finder.GetOne(ctx, types.ConsistencyLevelAll, shard, id, proj, adds) + + assert.ErrorIs(t, err, replica.ErrRead) + f.assertLogErrorContains(t, errAny.Error()) + + assert.Equal(t, nilObject, got) + }) + + t.Run(fmt.Sprintf("Success_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: object(id, 3)} + digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR, nil) + + got, err := finder.GetOne(ctx, types.ConsistencyLevelAll, shard, id, proj, adds) + assert.Nil(t, err) + assert.Equal(t, item.Object, got) + }) + + t.Run(fmt.Sprintf("NotFound_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + // obj = object(id, 3) + digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 0}} + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(emptyItem, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR, nil) + + got, err := finder.GetOne(ctx, types.ConsistencyLevelAll, shard, id, proj, adds) + assert.Nil(t, err) + assert.Equal(t, nilObject, got) + }) + + t.Run(fmt.Sprintf("ContextCancelledFastEnough_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinderWithTimings("A", time.Millisecond*128, time.Second*10) + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: object(id, 3)} + digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ticker = time.NewTicker(time.Millisecond * 100) + ) + + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).WaitUntil(ticker.C).Return(item, errAny) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).WaitUntil(ticker.C).Return(digestR, errAny) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).WaitUntil(ticker.C).Return(digestR, errAny) + + ctxTimeout, cancel := context.WithTimeout(ctx, time.Millisecond*500) + defer cancel() + before := time.Now() + got, err := finder.GetOne(ctxTimeout, types.ConsistencyLevelAll, shard, id, proj, adds) + if s := time.Since(before); s > time.Second { + assert.Failf(t, "GetOne took too long to return after context was cancelled", "took: %v", s) + } + assert.ErrorIs(t, err, replica.ErrRead) + assert.Equal(t, nilObject, got) + f.assertLogErrorContains(t, errAny.Error()) + }) + } + + // TODO investigate flakiness + // t.Run("Fetch02Digest1Fails", func(t *testing.T) { + // var ( + // f = newFakeFactory("C1", shard, nodes) + // finder = f.newFinder("A") + // digestIDs = []strfmt.UUID{id} + // item = replica.Replica{ID: id, Object: object(id, 3)} + // digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + // ) + // f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(emptyItem, errAny) + // f.RClient.On("FetchObject", anyVal, nodes[1], cls, shard, id, proj, adds).Return(item, nil) + // f.RClient.On("FetchObject", anyVal, nodes[2], cls, shard, id, proj, adds).Return(emptyItem, errAny) + // f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR, nil) + // f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR, errAny) + // f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR, nil) + + // got, err := finder.GetOne(ctx, Quorum, shard, id, proj, adds) + // assert.Nil(t, err) + // assert.Equal(t, item.Object, got) + // }) +} + +func TestFinderGetOneWithConsistencyLevelQuorum(t *testing.T) { + var ( + id = strfmt.UUID("123") + cls = "C1" + shard = "SH1" + nodes = []string{"A", "B", "C"} + ctx = context.Background() + adds = additional.Properties{} + proj = search.SelectProperties{} + nilObject *storobj.Object + emptyItem = replica.Replica{} + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + {"MultiTenant", true}, + {"SingleTenant", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("AllButOne_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: object(id, 3)} + digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR, errAny) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR, errAny) + + got, err := finder.GetOne(ctx, types.ConsistencyLevelQuorum, shard, id, proj, adds) + assert.ErrorIs(t, err, replica.ErrRead) + f.assertLogErrorContains(t, errAny.Error()) + assert.Equal(t, nilObject, got) + }) + + t.Run(fmt.Sprintf("Success_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: object(id, 3)} + digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR, errAny) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR, nil) + + got, err := finder.GetOne(ctx, types.ConsistencyLevelQuorum, shard, id, proj, adds) + assert.Nil(t, err) + assert.Equal(t, item.Object, got) + }) + + t.Run(fmt.Sprintf("NotFound_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + // obj = object(id, 3) + digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 0}} + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(emptyItem, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR, errAny) + + got, err := finder.GetOne(ctx, types.ConsistencyLevelQuorum, shard, id, proj, adds) + assert.Nil(t, err) + assert.Equal(t, nilObject, got) + }) + + // succeeds via Fetch0+Digest1 + t.Run(fmt.Sprintf("Digest02Fail_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: object(id, 3)} + digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, nil) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR, errAny) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR, errAny) + + got, err := finder.GetOne(ctx, types.ConsistencyLevelQuorum, shard, id, proj, adds) + assert.Nil(t, err) + assert.Equal(t, item.Object, got) + }) + + // fails because only Node0 succeeds + t.Run(fmt.Sprintf("Fetch12Digest12Fail_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: object(id, 3)} + digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, nil) + f.RClient.On("FetchObject", anyVal, nodes[1], cls, shard, id, proj, adds).Return(item, errAny) + f.RClient.On("FetchObject", anyVal, nodes[2], cls, shard, id, proj, adds).Return(item, errAny) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR, errAny) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR, errAny) + + got, err := finder.GetOne(ctx, types.ConsistencyLevelQuorum, shard, id, proj, adds) + + assert.ErrorIs(t, err, replica.ErrRead) + f.assertLogErrorContains(t, errAny.Error()) + assert.Equal(t, nilObject, got) + }) + + // fails because only Node1 succeeds + t.Run(fmt.Sprintf("Fetch02Digest02Fail_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: object(id, 3)} + digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, errAny) + f.RClient.On("FetchObject", anyVal, nodes[1], cls, shard, id, proj, adds).Return(item, nil) + f.RClient.On("FetchObject", anyVal, nodes[2], cls, shard, id, proj, adds).Return(item, errAny) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR, errAny) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR, errAny) + + got, err := finder.GetOne(ctx, types.ConsistencyLevelQuorum, shard, id, proj, adds) + + assert.ErrorIs(t, err, replica.ErrRead) + f.assertLogErrorContains(t, errAny.Error()) + assert.Equal(t, nilObject, got) + }) + } + + // TODO investigate flakiness + // succeeds via Fetch2+Digest1 + // t.Run("Fetch01Digest02Fail", func(t *testing.T) { + // var ( + // f = newFakeFactory("C1", shard, nodes) + // finder = f.newFinder("A") + // digestIDs = []strfmt.UUID{id} + // item = replica.Replica{ID: id, Object: object(id, 3)} + // digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + // ) + // f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, errAny) + // f.RClient.On("FetchObject", anyVal, nodes[1], cls, shard, id, proj, adds).Return(item, errAny) + // f.RClient.On("FetchObject", anyVal, nodes[2], cls, shard, id, proj, adds).Return(item, nil) + // f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR, errAny) + // f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR, nil) + // f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR, errAny) + + // got, err := finder.GetOne(ctx, Quorum, shard, id, proj, adds) + // assert.Nil(t, err) + // assert.Equal(t, item.Object, got) + // }) + + // investigate flakiness + // succeeds via Fetch1+Digest2 + // t.Run("Fetch02Digest01Fail", func(t *testing.T) { + // var ( + // f = newFakeFactory("C1", shard, nodes) + // finder = f.newFinder("A") + // digestIDs = []strfmt.UUID{id} + // item = replica.Replica{ID: id, Object: object(id, 3)} + // digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + // ) + // f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(emptyItem, errAny) + // f.RClient.On("FetchObject", anyVal, nodes[1], cls, shard, id, proj, adds).Return(item, nil) + // f.RClient.On("FetchObject", anyVal, nodes[2], cls, shard, id, proj, adds).Return(emptyItem, errAny) + // f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR, errAny) + // f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR, errAny) + // f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR, nil) + + // got, err := finder.GetOne(ctx, Quorum, shard, id, proj, adds) + // assert.Nil(t, err) + // assert.Equal(t, item.Object, got) + // }) +} + +func TestFinderGetOneWithConsistencyLevelOne(t *testing.T) { + var ( + id = strfmt.UUID("123") + cls = "C1" + shard = "SH1" + nodes = []string{"A", "B", "C"} + ctx = context.Background() + adds = additional.Properties{} + proj = search.SelectProperties{} + nilObject *storobj.Object + emptyItem = replica.Replica{} + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + {"MultiTenant", true}, + {"SingleTenant", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("None_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + // obj = replica.Replica{ID: id, Object: object(id, 3) + ) + for _, n := range nodes { + f.RClient.On("FetchObject", anyVal, n, cls, shard, id, proj, adds).Return(emptyItem, errAny) + } + + got, err := finder.GetOne(ctx, types.ConsistencyLevelOne, shard, id, proj, adds) + assert.ErrorIs(t, err, replica.ErrRead) + f.assertLogErrorContains(t, errAny.Error()) + assert.Equal(t, nilObject, got) + }) + + t.Run(fmt.Sprintf("Success_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder(nodes[2]) + item = replica.Replica{ID: id, Object: object(id, 3)} + ) + f.RClient.On("FetchObject", anyVal, nodes[2], cls, shard, id, proj, adds).Return(item, nil) + got, err := finder.GetOne(ctx, types.ConsistencyLevelOne, shard, id, proj, adds) + assert.Nil(t, err) + assert.Equal(t, item.Object, got) + }) + + t.Run(fmt.Sprintf("NotFound_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(emptyItem, nil) + + got, err := finder.GetOne(ctx, types.ConsistencyLevelOne, shard, id, proj, adds) + assert.Nil(t, err) + assert.Equal(t, nilObject, got) + }) + } +} + +func TestFinderExistsWithConsistencyLevelALL(t *testing.T) { + var ( + id = strfmt.UUID("123") + cls = "C1" + shard = "SH1" + nodes = []string{"A", "B", "C"} + ctx = context.Background() + nilReply = []types.RepairResponse(nil) + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + {"MultiTenant", true}, + {"SingleTenant", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("None_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(nilReply, errAny) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR, nil) + + got, err := finder.Exists(ctx, types.ConsistencyLevelAll, shard, id) + assert.ErrorIs(t, err, replica.ErrRead) + f.assertLogErrorContains(t, errAny.Error()) + assert.Equal(t, false, got) + }) + + t.Run(fmt.Sprintf("Success_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR, nil) + + got, err := finder.Exists(ctx, types.ConsistencyLevelAll, shard, id) + assert.Nil(t, err) + assert.Equal(t, true, got) + }) + + t.Run(fmt.Sprintf("NotFound_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 0, Deleted: true}} + ) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR, nil) + + got, err := finder.Exists(ctx, types.ConsistencyLevelAll, shard, id) + assert.Nil(t, err) + assert.Equal(t, false, got) + }) + } +} + +func TestFinderExistsWithConsistencyLevelQuorum(t *testing.T) { + var ( + id = strfmt.UUID("123") + cls = "C1" + shard = "SH1" + nodes = []string{"A", "B", "C"} + ctx = context.Background() + nilReply = []types.RepairResponse(nil) + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + {"MultiTenant", true}, + {"SingleTenant", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("AllButOne_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(nilReply, errAny) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR, errAny) + + got, err := finder.Exists(ctx, types.ConsistencyLevelQuorum, shard, id) + assert.ErrorIs(t, err, replica.ErrRead) + f.assertLogErrorContains(t, errAny.Error()) + assert.Equal(t, false, got) + }) + + t.Run(fmt.Sprintf("Success_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR, errAny) + + got, err := finder.Exists(ctx, types.ConsistencyLevelQuorum, shard, id) + assert.Nil(t, err) + assert.Equal(t, true, got) + }) + + t.Run(fmt.Sprintf("NotFound_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 0, Deleted: true}} + ) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR, errAny) + + got, err := finder.Exists(ctx, types.ConsistencyLevelQuorum, shard, id) + assert.Nil(t, err) + assert.Equal(t, false, got) + }) + } +} + +func TestFinderExistsWithConsistencyLevelOne(t *testing.T) { + var ( + id = strfmt.UUID("123") + cls = "C1" + shard = "SH1" + nodes = []string{"A", "B"} + ctx = context.Background() + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + {"MultiTenant", true}, + {"SingleTenant", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("Success_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR, errAny) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR, nil) + + got, err := finder.Exists(ctx, types.ConsistencyLevelOne, shard, id) + assert.Nil(t, err) + assert.Equal(t, true, got) + }) + + t.Run(fmt.Sprintf("NotFound_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + digestR = []types.RepairResponse{{ID: id.String(), UpdateTime: 0, Deleted: true}} + ) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR, nil) + + got, err := finder.Exists(ctx, types.ConsistencyLevelOne, shard, id) + assert.Nil(t, err) + assert.Equal(t, false, got) + }) + } +} + +func TestFinderCheckConsistencyALL(t *testing.T) { + var ( + ids = []strfmt.UUID{"0", "1", "2", "3", "4", "5"} + cls = "C1" + shards = []string{"S1", "S2", "S3"} + nodes = []string{"A", "B", "C"} + ctx = context.Background() + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + {"MultiTenant", true}, + {"SingleTenant", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("ExceptOne_%v", tc.variant), func(t *testing.T) { + var ( + shard = shards[0] + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + xs, digestR = genInputs("A", shard, 1, ids) + ) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, ids).Return(digestR, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, ids).Return(digestR, errAny) + + err := finder.CheckConsistency(ctx, types.ConsistencyLevelAll, xs) + want := setObjectsConsistency(xs, false) + assert.ErrorIs(t, err, replica.ErrRead) + assert.ElementsMatch(t, want, xs) + f.assertLogErrorContains(t, replica.ErrRead.Error()) + }) + + t.Run(fmt.Sprintf("OneShard_%v", tc.variant), func(t *testing.T) { + var ( + shard = shards[0] + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + xs, digestR = genInputs("A", shard, 2, ids) + ) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, ids).Return(digestR, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, ids).Return(digestR, nil) + + want := setObjectsConsistency(xs, true) + err := finder.CheckConsistency(ctx, types.ConsistencyLevelAll, xs) + assert.Nil(t, err) + assert.ElementsMatch(t, want, xs) + }) + + t.Run(fmt.Sprintf("TwoShards_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shards[0], nodes, tc.isMultiTenant) + finder = f.newFinder("A") + idSet1 = ids[:3] + idSet2 = ids[3:6] + xs1, digestR1 = genInputs("A", shards[0], 1, idSet1) + xs2, digestR2 = genInputs("B", shards[1], 2, idSet2) + ) + xs := make([]*storobj.Object, 0, len(xs1)+len(xs2)) + for i := 0; i < 3; i++ { + xs = append(xs, xs1[i]) + xs = append(xs, xs2[i]) + } + // first shard + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shards[0], idSet1).Return(digestR1, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shards[0], idSet1).Return(digestR1, nil) + + // second shard + f.AddShard(shards[1], nodes) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shards[1], idSet2).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shards[1], idSet2).Return(digestR2, nil) + + want := setObjectsConsistency(xs, true) + err := finder.CheckConsistency(ctx, types.ConsistencyLevelAll, xs) + assert.Nil(t, err) + assert.ElementsMatch(t, want, xs) + }) + + t.Run(fmt.Sprintf("ThreeShard_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shards[0], nodes, tc.isMultiTenant) + finder = f.newFinder("A") + ids1 = ids[:2] + ids2 = ids[2:4] + ids3 = ids[4:] + xs1, digestR1 = genInputs("A", shards[0], 1, ids1) + xs2, digestR2 = genInputs("B", shards[1], 2, ids2) + xs3, digestR3 = genInputs("C", shards[2], 3, ids3) + ) + xs := make([]*storobj.Object, 0, len(xs1)+len(xs2)) + for i := 0; i < 2; i++ { + xs = append(xs, xs1[i]) + xs = append(xs, xs2[i]) + xs = append(xs, xs3[i]) + } + // first shard + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shards[0], ids1).Return(digestR1, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shards[0], ids1).Return(digestR1, nil) + + // second shard + f.AddShard(shards[1], nodes) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shards[1], ids2).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shards[1], ids2).Return(digestR2, nil) + + // third shard + f.AddShard(shards[2], nodes) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shards[2], ids3).Return(digestR3, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shards[2], ids3).Return(digestR3, nil) + + want := setObjectsConsistency(xs, true) + err := finder.CheckConsistency(ctx, types.ConsistencyLevelAll, xs) + assert.Nil(t, err) + assert.ElementsMatch(t, want, xs) + }) + + t.Run(fmt.Sprintf("TwoShardSingleNode_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shards[0], nodes, tc.isMultiTenant) + finder = f.newFinder("A") + ids1 = ids[:2] + ids2 = ids[2:4] + ids3 = ids[4:] + xs1, digestR1 = genInputs("A", shards[0], 1, ids1) + xs2, digestR2 = genInputs("B", shards[1], 1, ids2) + xs3, digestR3 = genInputs("A", shards[2], 2, ids3) + ) + xs := make([]*storobj.Object, 0, len(xs1)+len(xs2)) + for i := 0; i < 2; i++ { + xs = append(xs, xs1[i]) + xs = append(xs, xs2[i]) + xs = append(xs, xs3[i]) + } + // first shard + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shards[0], ids1).Return(digestR1, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shards[0], ids1).Return(digestR1, nil) + + // second shard + f.AddShard(shards[1], nodes) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shards[1], ids2).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shards[1], ids2).Return(digestR2, nil) + + // third shard + f.AddShard(shards[2], nodes) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shards[2], ids3).Return(digestR3, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shards[2], ids3).Return(digestR3, nil) + + want := setObjectsConsistency(xs, true) + err := finder.CheckConsistency(ctx, types.ConsistencyLevelAll, xs) + assert.Nil(t, err) + assert.ElementsMatch(t, want, xs) + }) + } +} + +func TestFinderCheckConsistencyQuorum(t *testing.T) { + var ( + ids = []strfmt.UUID{"10", "20", "30"} + cls = "C1" + shard = "SH1" + nodes = []string{"A", "B", "C"} + ctx = context.Background() + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + {"MultiTenant", true}, + {"SingleTenant", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("MalformedInputs_%v", tc.variant), func(t *testing.T) { + var ( + ids = []strfmt.UUID{"10", "20", "30"} + shard = "SH1" + nodes = []string{"A", "B", "C"} + ctx = context.Background() + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + xs1 = []*storobj.Object{ + objectEx(ids[0], 4, shard, "A"), + nil, + objectEx(ids[2], 6, shard, "A"), + } + // BelongToShard and BelongToNode are empty + xs2 = []*storobj.Object{ + objectEx(ids[0], 4, shard, "A"), + {Object: models.Object{ID: ids[1]}}, + objectEx(ids[2], 6, shard, "A"), + } + ) + + assert.Nil(t, finder.CheckConsistency(ctx, types.ConsistencyLevelQuorum, nil)) + + err := finder.CheckConsistency(ctx, types.ConsistencyLevelQuorum, xs1) + assert.NotNil(t, err) + + err = finder.CheckConsistency(ctx, types.ConsistencyLevelQuorum, xs2) + assert.NotNil(t, err) + }) + + t.Run(fmt.Sprintf("None_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + xs = []*storobj.Object{ + objectEx(ids[0], 1, shard, "A"), + objectEx(ids[1], 2, shard, "A"), + objectEx(ids[2], 3, shard, "A"), + } + digestR = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 1}, + {ID: ids[1].String(), UpdateTime: 2}, + {ID: ids[2].String(), UpdateTime: 3}, + } + ) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, ids).Return(digestR, errAny) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, ids).Return(digestR, errAny) + + err := finder.CheckConsistency(ctx, types.ConsistencyLevelAll, xs) + want := setObjectsConsistency(xs, false) + assert.ErrorIs(t, err, replica.ErrRead) + assert.ElementsMatch(t, want, xs) + f.assertLogErrorContains(t, replica.ErrRead.Error()) + }) + + t.Run(fmt.Sprintf("Success_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + xs = []*storobj.Object{ + objectEx(ids[0], 1, shard, "A"), + objectEx(ids[1], 2, shard, "A"), + objectEx(ids[2], 3, shard, "A"), + } + digestR = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 1}, + {ID: ids[1].String(), UpdateTime: 2}, + {ID: ids[2].String(), UpdateTime: 3}, + } + want = setObjectsConsistency(xs, true) + ) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, ids).Return(digestR, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, ids).Return(digestR, errAny) + + err := finder.CheckConsistency(ctx, types.ConsistencyLevelQuorum, xs) + assert.Nil(t, err) + assert.ElementsMatch(t, want, xs) + }) + } +} + +func TestFinderCheckConsistencyOne(t *testing.T) { + var ( + ids = []strfmt.UUID{"10", "20", "30"} + shard = "SH1" + nodes = []string{"A", "B", "C"} + ctx = context.Background() + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + {"MultiTenant", true}, + {"SingleTenant", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("CheckConsistencyOne_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + xs = []*storobj.Object{ + objectEx(ids[0], 4, shard, "A"), + objectEx(ids[1], 5, shard, "A"), + objectEx(ids[2], 6, shard, "A"), + } + want = setObjectsConsistency(xs, true) + ) + + err := finder.CheckConsistency(ctx, types.ConsistencyLevelOne, xs) + assert.Nil(t, err) + assert.Equal(t, want, xs) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/replica/mocks_test.go b/platform/dbops/binaries/weaviate-src/usecases/replica/mocks_test.go new file mode 100644 index 0000000000000000000000000000000000000000..905a89d14744733d0f488287f4fe4a3dff2ff528 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/replica/mocks_test.go @@ -0,0 +1,139 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replica_test + +import ( + "context" + "time" + + "github.com/weaviate/weaviate/usecases/replica" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/mock" + "github.com/weaviate/weaviate/cluster/router/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/filters" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/replica/hashtree" +) + +type fakeRClient struct { + mock.Mock +} + +func (f *fakeRClient) FetchObject(ctx context.Context, host, index, shard string, + id strfmt.UUID, props search.SelectProperties, + additional additional.Properties, numRetries int, +) (replica.Replica, error) { + args := f.Called(ctx, host, index, shard, id, props, additional) + return args.Get(0).(replica.Replica), args.Error(1) +} + +func (f *fakeRClient) FetchObjects(ctx context.Context, host, index, + shard string, ids []strfmt.UUID, +) ([]replica.Replica, error) { + args := f.Called(ctx, host, index, shard, ids) + return args.Get(0).([]replica.Replica), args.Error(1) +} + +func (f *fakeRClient) OverwriteObjects(ctx context.Context, host, index, shard string, + xs []*objects.VObject, +) ([]types.RepairResponse, error) { + args := f.Called(ctx, host, index, shard, xs) + return args.Get(0).([]types.RepairResponse), args.Error(1) +} + +func (f *fakeRClient) DigestObjects(ctx context.Context, host, index, shard string, + ids []strfmt.UUID, numRetries int, +) ([]types.RepairResponse, error) { + args := f.Called(ctx, host, index, shard, ids) + return args.Get(0).([]types.RepairResponse), args.Error(1) +} + +func (f *fakeRClient) FindUUIDs(ctx context.Context, host, index, shard string, + filters *filters.LocalFilter, +) ([]strfmt.UUID, error) { + args := f.Called(ctx, host, index, shard, filters) + return args.Get(0).([]strfmt.UUID), args.Error(1) +} + +func (f *fakeRClient) DigestObjectsInRange(ctx context.Context, host, index, shard string, + initialUUID, finalUUID strfmt.UUID, limit int, +) ([]types.RepairResponse, error) { + args := f.Called(ctx, host, index, shard, initialUUID, finalUUID, limit) + return args.Get(0).([]types.RepairResponse), args.Error(1) +} + +func (f *fakeRClient) HashTreeLevel(ctx context.Context, + host, index, shard string, level int, discriminant *hashtree.Bitset, +) (digests []hashtree.Digest, err error) { + args := f.Called(ctx, host, index, shard, level, discriminant) + return args.Get(0).([]hashtree.Digest), args.Error(1) +} + +type fakeClient struct { + mock.Mock +} + +func (f *fakeClient) PutObject(ctx context.Context, host, index, shard, requestID string, + obj *storobj.Object, schemaVersion uint64, +) (replica.SimpleResponse, error) { + args := f.Called(ctx, host, index, shard, requestID, obj, schemaVersion) + return args.Get(0).(replica.SimpleResponse), args.Error(1) +} + +func (f *fakeClient) DeleteObject(ctx context.Context, host, index, shard, requestID string, + id strfmt.UUID, deletionTime time.Time, schemaVersion uint64, +) (replica.SimpleResponse, error) { + args := f.Called(ctx, host, index, shard, requestID, id, deletionTime, schemaVersion) + return args.Get(0).(replica.SimpleResponse), args.Error(1) +} + +func (f *fakeClient) MergeObject(ctx context.Context, host, index, shard, requestID string, + doc *objects.MergeDocument, schemaVersion uint64, +) (replica.SimpleResponse, error) { + args := f.Called(ctx, host, index, shard, requestID, doc, schemaVersion) + return args.Get(0).(replica.SimpleResponse), args.Error(1) +} + +func (f *fakeClient) PutObjects(ctx context.Context, host, index, shard, requestID string, + objs []*storobj.Object, schemaVersion uint64, +) (replica.SimpleResponse, error) { + args := f.Called(ctx, host, index, shard, requestID, objs, schemaVersion) + return args.Get(0).(replica.SimpleResponse), args.Error(1) +} + +func (f *fakeClient) DeleteObjects(ctx context.Context, host, index, shard, requestID string, + uuids []strfmt.UUID, deletionTime time.Time, dryRun bool, schemaVersion uint64, +) (replica.SimpleResponse, error) { + args := f.Called(ctx, host, index, shard, requestID, uuids, deletionTime, dryRun, schemaVersion) + return args.Get(0).(replica.SimpleResponse), args.Error(1) +} + +func (f *fakeClient) AddReferences(ctx context.Context, host, index, shard, requestID string, + refs []objects.BatchReference, schemaVersion uint64, +) (replica.SimpleResponse, error) { + args := f.Called(ctx, host, index, shard, requestID, refs, schemaVersion) + return args.Get(0).(replica.SimpleResponse), args.Error(1) +} + +func (f *fakeClient) Commit(ctx context.Context, host, index, shard, requestID string, resp interface{}) error { + args := f.Called(ctx, host, index, shard, requestID, resp) + return args.Error(0) +} + +func (f *fakeClient) Abort(ctx context.Context, host, index, shard, requestID string) (replica.SimpleResponse, error) { + args := f.Called(ctx, host, index, shard, requestID) + return args.Get(0).(replica.SimpleResponse), args.Error(1) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/replica/remote_incoming.go b/platform/dbops/binaries/weaviate-src/usecases/replica/remote_incoming.go new file mode 100644 index 0000000000000000000000000000000000000000..b736a51cc5741c251fc338222121720de7eb9ce8 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/replica/remote_incoming.go @@ -0,0 +1,229 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replica + +import ( + "context" + "fmt" + "time" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/cluster/router/types" + "github.com/weaviate/weaviate/entities/schema" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/objects" + "github.com/weaviate/weaviate/usecases/replica/hashtree" +) + +type RemoteIncomingRepo interface { + GetIndexForIncomingReplica(className schema.ClassName) RemoteIndexIncomingRepo +} + +type RemoteIncomingSchema interface { + // WaitForUpdate ensures that the local schema has caught up to schemaVersion + WaitForUpdate(ctx context.Context, schemaVersion uint64) error +} + +type RemoteIndexIncomingRepo interface { + // Write endpoints + ReplicateObject(ctx context.Context, shardName, requestID string, object *storobj.Object) SimpleResponse + ReplicateObjects(ctx context.Context, shardName, requestID string, objects []*storobj.Object, schemaVersion uint64) SimpleResponse + ReplicateUpdate(ctx context.Context, shardName, requestID string, mergeDoc *objects.MergeDocument) SimpleResponse + ReplicateDeletion(ctx context.Context, shardName, requestID string, uuid strfmt.UUID, deletionTime time.Time) SimpleResponse + ReplicateDeletions(ctx context.Context, shardName, requestID string, uuids []strfmt.UUID, deletionTime time.Time, dryRun bool, schemaVersion uint64) SimpleResponse + ReplicateReferences(ctx context.Context, shardName, requestID string, refs []objects.BatchReference) SimpleResponse + CommitReplication(shardName, requestID string) interface{} + AbortReplication(shardName, requestID string) interface{} + OverwriteObjects(ctx context.Context, shard string, vobjects []*objects.VObject) ([]types.RepairResponse, error) + // Read endpoints + FetchObject(ctx context.Context, shardName string, id strfmt.UUID) (Replica, error) + FetchObjects(ctx context.Context, shardName string, ids []strfmt.UUID) ([]Replica, error) + DigestObjects(ctx context.Context, shardName string, ids []strfmt.UUID) (result []types.RepairResponse, err error) + DigestObjectsInRange(ctx context.Context, shardName string, + initialUUID, finalUUID strfmt.UUID, limit int) (result []types.RepairResponse, err error) + HashTreeLevel(ctx context.Context, shardName string, + level int, discriminant *hashtree.Bitset) (digests []hashtree.Digest, err error) +} + +type RemoteReplicaIncoming struct { + repo RemoteIncomingRepo + schema RemoteIncomingSchema +} + +func NewRemoteReplicaIncoming(repo RemoteIncomingRepo, schema RemoteIncomingSchema) *RemoteReplicaIncoming { + return &RemoteReplicaIncoming{ + schema: schema, + repo: repo, + } +} + +func (rri *RemoteReplicaIncoming) ReplicateObject(ctx context.Context, indexName, + shardName, requestID string, object *storobj.Object, schemaVersion uint64, +) SimpleResponse { + index, simpleResp := rri.indexForIncomingWrite(ctx, indexName, schemaVersion) + if simpleResp != nil { + return *simpleResp + } + return index.ReplicateObject(ctx, shardName, requestID, object) +} + +func (rri *RemoteReplicaIncoming) ReplicateObjects(ctx context.Context, indexName, + shardName, requestID string, objects []*storobj.Object, schemaVersion uint64, +) SimpleResponse { + index, simpleResp := rri.indexForIncomingWrite(ctx, indexName, schemaVersion) + if simpleResp != nil { + return *simpleResp + } + return index.ReplicateObjects(ctx, shardName, requestID, objects, schemaVersion) +} + +func (rri *RemoteReplicaIncoming) ReplicateUpdate(ctx context.Context, indexName, + shardName, requestID string, mergeDoc *objects.MergeDocument, schemaVersion uint64, +) SimpleResponse { + index, simpleResp := rri.indexForIncomingWrite(ctx, indexName, schemaVersion) + if simpleResp != nil { + return *simpleResp + } + return index.ReplicateUpdate(ctx, shardName, requestID, mergeDoc) +} + +func (rri *RemoteReplicaIncoming) ReplicateDeletion(ctx context.Context, indexName, + shardName, requestID string, uuid strfmt.UUID, deletionTime time.Time, schemaVersion uint64, +) SimpleResponse { + index, simpleResp := rri.indexForIncomingWrite(ctx, indexName, schemaVersion) + if simpleResp != nil { + return *simpleResp + } + return index.ReplicateDeletion(ctx, shardName, requestID, uuid, deletionTime) +} + +func (rri *RemoteReplicaIncoming) ReplicateDeletions(ctx context.Context, indexName, + shardName, requestID string, uuids []strfmt.UUID, deletionTime time.Time, dryRun bool, schemaVersion uint64, +) SimpleResponse { + index, simpleResp := rri.indexForIncomingWrite(ctx, indexName, schemaVersion) + if simpleResp != nil { + return *simpleResp + } + return index.ReplicateDeletions(ctx, shardName, requestID, uuids, deletionTime, dryRun, schemaVersion) +} + +func (rri *RemoteReplicaIncoming) ReplicateReferences(ctx context.Context, indexName, + shardName, requestID string, refs []objects.BatchReference, schemaVersion uint64, +) SimpleResponse { + index, simpleResp := rri.indexForIncomingWrite(ctx, indexName, schemaVersion) + if simpleResp != nil { + return *simpleResp + } + return index.ReplicateReferences(ctx, shardName, requestID, refs) +} + +func (rri *RemoteReplicaIncoming) CommitReplication(indexName, + shardName, requestID string, +) interface{} { + index, simpleResp := rri.indexForIncomingRead(context.Background(), indexName) + if simpleResp != nil { + return *simpleResp + } + return index.CommitReplication(shardName, requestID) +} + +func (rri *RemoteReplicaIncoming) AbortReplication(indexName, + shardName, requestID string, +) interface{} { + index, simpleResp := rri.indexForIncomingRead(context.Background(), indexName) + if simpleResp != nil { + return *simpleResp + } + return index.AbortReplication(shardName, requestID) +} + +func (rri *RemoteReplicaIncoming) OverwriteObjects(ctx context.Context, + indexName, shardName string, vobjects []*objects.VObject, +) ([]types.RepairResponse, error) { + index, simpleResp := rri.indexForIncomingRead(ctx, indexName) + if simpleResp != nil { + return nil, simpleResp.Errors[0].Err + } + return index.OverwriteObjects(ctx, shardName, vobjects) +} + +func (rri *RemoteReplicaIncoming) FetchObject(ctx context.Context, + indexName, shardName string, id strfmt.UUID, +) (Replica, error) { + index, simpleResp := rri.indexForIncomingRead(ctx, indexName) + if simpleResp != nil { + return Replica{}, simpleResp.Errors[0].Err + } + return index.FetchObject(ctx, shardName, id) +} + +func (rri *RemoteReplicaIncoming) FetchObjects(ctx context.Context, + indexName, shardName string, ids []strfmt.UUID, +) ([]Replica, error) { + index, simpleResp := rri.indexForIncomingRead(ctx, indexName) + if simpleResp != nil { + return []Replica{}, simpleResp.Errors[0].Err + } + return index.FetchObjects(ctx, shardName, ids) +} + +func (rri *RemoteReplicaIncoming) DigestObjects(ctx context.Context, + indexName, shardName string, ids []strfmt.UUID, +) (result []types.RepairResponse, err error) { + index, simpleResp := rri.indexForIncomingRead(ctx, indexName) + if simpleResp != nil { + return []types.RepairResponse{}, simpleResp.Errors[0].Err + } + return index.DigestObjects(ctx, shardName, ids) +} + +func (rri *RemoteReplicaIncoming) indexForIncomingRead(ctx context.Context, indexName string) (RemoteIndexIncomingRepo, *SimpleResponse) { + index := rri.repo.GetIndexForIncomingReplica(schema.ClassName(indexName)) + if index == nil { + return nil, &SimpleResponse{Errors: []Error{{Err: fmt.Errorf("local index %q not found", indexName)}}} + } + return index, nil +} + +func (rri *RemoteReplicaIncoming) indexForIncomingWrite(ctx context.Context, indexName string, + schemaVersion uint64, +) (RemoteIndexIncomingRepo, *SimpleResponse) { + if err := rri.schema.WaitForUpdate(ctx, schemaVersion); err != nil { + return nil, &SimpleResponse{Errors: []Error{{Err: fmt.Errorf("error waiting for schema version %d: %w", schemaVersion, err)}}} + } + index := rri.repo.GetIndexForIncomingReplica(schema.ClassName(indexName)) + if index == nil { + return nil, &SimpleResponse{Errors: []Error{{Err: fmt.Errorf("local index %q not found", indexName)}}} + } + return index, nil +} + +func (rri *RemoteReplicaIncoming) DigestObjectsInRange(ctx context.Context, + indexName, shardName string, initialUUID, finalUUID strfmt.UUID, limit int, +) (result []types.RepairResponse, err error) { + index, simpleResp := rri.indexForIncomingRead(ctx, indexName) + if simpleResp != nil { + return []types.RepairResponse{}, simpleResp.Errors[0].Err + } + return index.DigestObjectsInRange(ctx, shardName, initialUUID, finalUUID, limit) +} + +func (rri *RemoteReplicaIncoming) HashTreeLevel(ctx context.Context, + indexName, shardName string, level int, discriminant *hashtree.Bitset, +) (digests []hashtree.Digest, err error) { + index, simpleResp := rri.indexForIncomingRead(ctx, indexName) + if simpleResp != nil { + return []hashtree.Digest{}, simpleResp.Errors[0].Err + } + + return index.HashTreeLevel(ctx, shardName, level, discriminant) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/replica/repairer.go b/platform/dbops/binaries/weaviate-src/usecases/replica/repairer.go new file mode 100644 index 0000000000000000000000000000000000000000..f030d797386eeba4a6f74e57e05992d254392761 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/replica/repairer.go @@ -0,0 +1,545 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replica + +import ( + "context" + "errors" + "fmt" + "sort" + + "github.com/weaviate/weaviate/entities/models" + + "github.com/sirupsen/logrus" + enterrors "github.com/weaviate/weaviate/entities/errors" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/objects" +) + +var ( + // ErrConflictFindDeleted object exists on one replica but is deleted on the other. + // + // It depends on the order of operations + // Created -> Deleted => It is safe in this case to propagate deletion to all replicas + // Created -> Deleted -> Created => propagating deletion will result in data lost + + ErrConflictExistOrDeleted = errors.New("conflict: object has been deleted on another replica") + + // ErrConflictObjectChanged object changed since last time and cannot be repaired + ErrConflictObjectChanged = errors.New("source object changed during repair") +) + +// repairer tries to detect inconsistencies and repair objects when reading them from replicas +type repairer struct { + class string + getDeletionStrategy func() string + client FinderClient // needed to commit and abort operation + logger logrus.FieldLogger +} + +// repairOne repairs a single object (used by Finder::GetOne) +func (r *repairer) repairOne(ctx context.Context, + shard string, + id strfmt.UUID, + votes []ObjTuple, + contentIdx int, +) (_ *storobj.Object, err error) { + var ( + deleted bool + deletionTime int64 + lastUTime int64 + winnerIdx int + cl = r.client + deletionStrategy = r.getDeletionStrategy() + ) + + for i, x := range votes { + if x.O.Deleted { + deleted = true + + if x.UTime > deletionTime { + deletionTime = x.UTime + } + } + if x.UTime > lastUTime { + lastUTime = x.UTime + winnerIdx = i + } + } + + if deleted && deletionStrategy == models.ReplicationConfigDeletionStrategyDeleteOnConflict { + gr := enterrors.NewErrorGroupWrapper(r.logger) + for _, vote := range votes { + if vote.O.Deleted && vote.UTime == deletionTime { + continue + } + + vote := vote + + gr.Go(func() error { + ups := []*objects.VObject{{ + ID: id, + Deleted: true, + LastUpdateTimeUnixMilli: deletionTime, + StaleUpdateTime: vote.UTime, + }} + resp, err := cl.Overwrite(ctx, vote.Sender, r.class, shard, ups) + if err != nil { + return fmt.Errorf("node %q could not repair deleted object: %w", vote.Sender, err) + } + if len(resp) > 0 && resp[0].Err != "" { + return fmt.Errorf("overwrite deleted object %w %s: %s", ErrConflictObjectChanged, vote.Sender, resp[0].Err) + } + return nil + }) + } + + return nil, gr.Wait() + } + + if deleted && deletionStrategy != models.ReplicationConfigDeletionStrategyTimeBasedResolution { + return nil, ErrConflictExistOrDeleted + } + + // fetch most recent object + updates := votes[contentIdx].O + winner := votes[winnerIdx] + + if updates.UpdateTime() != lastUTime { + updates, err = cl.FullRead(ctx, winner.Sender, r.class, shard, id, + search.SelectProperties{}, additional.Properties{}, 9) + if err != nil { + return nil, fmt.Errorf("get most recent object from %s: %w", winner.Sender, err) + } + if updates.UpdateTime() != lastUTime { + return nil, fmt.Errorf("fetch new state from %s: %w, %w", winner.Sender, ErrConflictObjectChanged, err) + } + } + + gr := enterrors.NewErrorGroupWrapper(r.logger) + for _, vote := range votes { // repair + if vote.UTime == lastUTime { + continue + } + + vote := vote + + gr.Go(func() error { + var latestObject *models.Object + var vector []float32 + var vectors map[string][]float32 + var multiVectors map[string][][]float32 + + if !updates.Deleted { + latestObject = &updates.Object.Object + vector = updates.Object.Vector + if updates.Object.Vectors != nil { + vectors = make(map[string][]float32, len(updates.Object.Vectors)) + for targetVector, v := range updates.Object.Vectors { + vectors[targetVector] = v + } + } + if updates.Object.MultiVectors != nil { + multiVectors = make(map[string][][]float32, len(updates.Object.MultiVectors)) + for targetVector, v := range updates.Object.MultiVectors { + multiVectors[targetVector] = v + } + } + } + + ups := []*objects.VObject{{ + ID: updates.ID, + Deleted: updates.Deleted, + LastUpdateTimeUnixMilli: updates.UpdateTime(), + LatestObject: latestObject, + Vector: vector, + Vectors: vectors, + MultiVectors: multiVectors, + StaleUpdateTime: vote.UTime, + }} + resp, err := cl.Overwrite(ctx, vote.Sender, r.class, shard, ups) + if err != nil { + return fmt.Errorf("node %q could not repair object: %w", vote.Sender, err) + } + if len(resp) > 0 && resp[0].Err != "" { + return fmt.Errorf("overwrite %w %s: %s", ErrConflictObjectChanged, vote.Sender, resp[0].Err) + } + return nil + }) + } + + return updates.Object, gr.Wait() +} + +// iTuple tuple of indices used to identify a unique object +type iTuple struct { + S int // sender's index + O int // object's index + T int64 // last update time + Deleted bool +} + +// repairExist repairs a single object when checking for existence +func (r *repairer) repairExist(ctx context.Context, + shard string, + id strfmt.UUID, + votes []BoolTuple, +) (_ bool, err error) { + var ( + deleted bool + deletionTime int64 + lastUTime int64 + winnerIdx int + cl = r.client + deletionStrategy = r.getDeletionStrategy() + ) + + for i, x := range votes { + if x.O.Deleted { + deleted = true + + if x.UTime > deletionTime { + deletionTime = x.UTime + } + } + if x.UTime > lastUTime { + lastUTime = x.UTime + winnerIdx = i + } + } + + if deleted && deletionStrategy == models.ReplicationConfigDeletionStrategyDeleteOnConflict { + gr := enterrors.NewErrorGroupWrapper(r.logger) + + for _, vote := range votes { + if vote.O.Deleted && vote.UTime == deletionTime { + continue + } + + vote := vote + + gr.Go(func() error { + ups := []*objects.VObject{{ + ID: id, + Deleted: true, + LastUpdateTimeUnixMilli: deletionTime, + StaleUpdateTime: vote.UTime, + }} + resp, err := cl.Overwrite(ctx, vote.Sender, r.class, shard, ups) + if err != nil { + return fmt.Errorf("node %q could not repair deleted object: %w", vote.Sender, err) + } + if len(resp) > 0 && resp[0].Err != "" { + return fmt.Errorf("overwrite deleted object %w %s: %s", ErrConflictObjectChanged, vote.Sender, resp[0].Err) + } + return nil + }) + } + + return false, gr.Wait() + } + + if deleted && deletionStrategy != models.ReplicationConfigDeletionStrategyTimeBasedResolution { + return false, ErrConflictExistOrDeleted + } + + // fetch most recent object + winner := votes[winnerIdx] + resp, err := cl.FullRead(ctx, winner.Sender, r.class, shard, id, search.SelectProperties{}, additional.Properties{}, 9) + if err != nil { + return false, fmt.Errorf("get most recent object from %s: %w", winner.Sender, err) + } + if resp.UpdateTime() != lastUTime { + return false, fmt.Errorf("fetch new state from %s: %w, %w", winner.Sender, ErrConflictObjectChanged, err) + } + + gr, ctx := enterrors.NewErrorGroupWithContextWrapper(r.logger, ctx) + + for _, vote := range votes { // repair + if vote.UTime == lastUTime { + continue + } + + vote := vote + + gr.Go(func() error { + var latestObject *models.Object + var vector []float32 + var vectors map[string][]float32 + var multiVectors map[string][][]float32 + + if !resp.Deleted { + latestObject = &resp.Object.Object + vector = resp.Object.Vector + if resp.Object.Vectors != nil { + vectors = make(map[string][]float32, len(resp.Object.Vectors)) + for targetVector, v := range resp.Object.Vectors { + vectors[targetVector] = v + } + } + if resp.Object.MultiVectors != nil { + multiVectors = make(map[string][][]float32, len(resp.Object.MultiVectors)) + for targetVector, v := range resp.Object.MultiVectors { + multiVectors[targetVector] = v + } + } + } + + ups := []*objects.VObject{{ + ID: resp.ID, + Deleted: resp.Deleted, + LastUpdateTimeUnixMilli: resp.UpdateTime(), + LatestObject: latestObject, + Vector: vector, + Vectors: vectors, + MultiVectors: multiVectors, + StaleUpdateTime: vote.UTime, + }} + + resp, err := cl.Overwrite(ctx, vote.Sender, r.class, shard, ups) + if err != nil { + return fmt.Errorf("node %q could not repair object: %w", vote.Sender, err) + } + if len(resp) > 0 && resp[0].Err != "" { + return fmt.Errorf("overwrite %w %s: %s", ErrConflictObjectChanged, vote.Sender, resp[0].Err) + } + + return nil + }) + } + + return !resp.Deleted, gr.Wait() +} + +// repairAll repairs objects when reading them ((use in combination with Finder::GetAll) +func (r *repairer) repairBatchPart(ctx context.Context, + shard string, + ids []strfmt.UUID, + votes []Vote, + contentIdx int, +) ([]*storobj.Object, error) { + var ( + result = make([]*storobj.Object, len(ids)) // final result + lastTimes = make([]iTuple, len(ids)) // most recent times + lastDeletionTimes = make([]int64, len(ids)) // most recent deletion times + ms = make([]iTuple, 0, len(ids)) // mismatches + cl = r.client + nVotes = len(votes) + // The input objects cannot be used for repair because + // their attributes might have been filtered out + reFetchSet = make(map[int]struct{}) + deletionStrategy = r.getDeletionStrategy() + ) + + // find most recent objects + for i, x := range votes[contentIdx].FullData { + lastTimes[i] = iTuple{S: contentIdx, O: i, T: x.UpdateTime(), Deleted: x.Deleted} + if x.Deleted { + lastDeletionTimes[i] = x.UpdateTime() + } + votes[contentIdx].Count[i] = nVotes // reuse Count[] to check consistency + } + + for i, vote := range votes { + if i != contentIdx { + for j, x := range vote.DigestData { + if curTime := lastTimes[j].T; x.UpdateTime > curTime { + // input object is not up to date + lastTimes[j] = iTuple{S: i, O: j, T: x.UpdateTime} + reFetchSet[j] = struct{}{} // we need to fetch this object again + } + + lastTimes[j].Deleted = lastTimes[j].Deleted || x.Deleted + + if x.Deleted && x.UpdateTime > lastDeletionTimes[j] { + lastDeletionTimes[j] = x.UpdateTime + } + + votes[i].Count[j] = nVotes + } + } + } + + // find missing content (diff) + for i, p := range votes[contentIdx].FullData { + if lastTimes[i].Deleted && lastDeletionTimes[i] == lastTimes[i].T { + continue + } + + if _, ok := reFetchSet[i]; ok { + ms = append(ms, lastTimes[i]) + } else { + result[i] = p.Object + } + } + + if len(ms) > 0 { // fetch most recent objects + // partition by hostname + sort.SliceStable(ms, func(i, j int) bool { return ms[i].S < ms[j].S }) + partitions := make([]int, 0, len(votes)) + pre := ms[0].S + for i, y := range ms { + if y.S != pre { + partitions = append(partitions, i) + pre = y.S + } + } + partitions = append(partitions, len(ms)) + + // concurrent fetches + gr, ctx := enterrors.NewErrorGroupWithContextWrapper(r.logger, ctx) + start := 0 + for _, end := range partitions { // fetch diffs + rid := ms[start].S + receiver := votes[rid].Sender + query := make([]strfmt.UUID, end-start) + for j := 0; start < end; start++ { + query[j] = ids[ms[start].O] + j++ + } + start := start + gr.Go(func() error { + resp, err := cl.FullReads(ctx, receiver, r.class, shard, query) + for i, n := 0, len(query); i < n; i++ { + idx := ms[start-n+i].O + if err != nil || lastTimes[idx].T != resp[i].UpdateTime() { + votes[rid].Count[idx]-- + } else { + result[idx] = resp[i].Object + } + } + return nil + }) + + } + if err := gr.Wait(); err != nil { + return nil, err + } + } + + // concurrent repairs + gr, ctx := enterrors.NewErrorGroupWithContextWrapper(r.logger, ctx) + + for rid, vote := range votes { + query := make([]*objects.VObject, 0, len(ids)/2) + m := make(map[string]int, len(ids)/2) // + + for j, x := range lastTimes { + if !x.Deleted && result[j] == nil { + // latest object could not be fetched + continue + } + + if x.Deleted && deletionStrategy == models.ReplicationConfigDeletionStrategyDeleteOnConflict { + alreadyDeleted := false + + if rid == contentIdx { + alreadyDeleted = vote.BatchReply.FullData[j].Deleted + } else { + alreadyDeleted = vote.BatchReply.DigestData[j].Deleted + } + + if alreadyDeleted && lastDeletionTimes[j] == vote.UpdateTimeAt(j) { + continue + } + + obj := objects.VObject{ + ID: ids[j], + Deleted: true, + LastUpdateTimeUnixMilli: lastDeletionTimes[j], + StaleUpdateTime: vote.UpdateTimeAt(j), + } + query = append(query, &obj) + m[string(ids[j])] = j + + continue + } + + if x.Deleted && deletionStrategy != models.ReplicationConfigDeletionStrategyTimeBasedResolution { + // note: conflict is not resolved + continue + } + + cTime := vote.UpdateTimeAt(j) + + if x.T != cTime && vote.Count[j] == nVotes { + var latestObject *models.Object + var vector []float32 + var vectors map[string][]float32 + var multiVectors map[string][][]float32 + + deleted := x.Deleted && lastDeletionTimes[j] == x.T + + if !deleted { + latestObject = &result[j].Object + vector = result[j].Vector + if result[j].Vectors != nil { + vectors = make(map[string][]float32, len(result[j].Vectors)) + for targetVector, v := range result[j].Vectors { + vectors[targetVector] = v + } + } + if result[j].MultiVectors != nil { + multiVectors = make(map[string][][]float32, len(result[j].MultiVectors)) + for targetVector, v := range result[j].MultiVectors { + multiVectors[targetVector] = v + } + } + } + + obj := objects.VObject{ + ID: ids[j], + Deleted: deleted, + LastUpdateTimeUnixMilli: x.T, + LatestObject: latestObject, + Vector: vector, + Vectors: vectors, + MultiVectors: multiVectors, + StaleUpdateTime: cTime, + } + query = append(query, &obj) + m[string(ids[j])] = j + } + } + + if len(query) == 0 { + continue + } + + receiver := vote.Sender + rid := rid + + gr.Go(func() error { + rs, err := cl.Overwrite(ctx, receiver, r.class, shard, query) + if err != nil { + for _, idx := range m { + votes[rid].Count[idx]-- + } + return nil + } + for _, r := range rs { + if r.Err != "" { + if idx, ok := m[r.ID]; ok { + votes[rid].Count[idx]-- + } + } + } + return nil + }) + } + + return result, gr.Wait() +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/replica/repairer_test.go b/platform/dbops/binaries/weaviate-src/usecases/replica/repairer_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8ee8ee15a7b26a9390e29226c6c49c1ba93e83ef --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/replica/repairer_test.go @@ -0,0 +1,1548 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replica_test + +import ( + "context" + "fmt" + "testing" + + "github.com/weaviate/weaviate/usecases/replica" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/cluster/router/types" + "github.com/weaviate/weaviate/entities/additional" + "github.com/weaviate/weaviate/entities/search" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/objects" +) + +func TestRepairerOneWithALL(t *testing.T) { + var ( + id = strfmt.UUID("123") + cls = "C1" + shard = "SH1" + nodes = []string{"A", "B", "C"} + ctx = context.Background() + adds = additional.Properties{} + proj = search.SelectProperties{} + nilObject *storobj.Object + emptyItem = replica.Replica{} + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + { + variant: "MultiTenant", + isMultiTenant: true, + }, + { + variant: "SingleTenant", + isMultiTenant: false, + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("GetContentFromDirectRead_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: object(id, 3)} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 2}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR3, nil) + + updates := []*objects.VObject{{ + ID: id, + Deleted: false, + LastUpdateTimeUnixMilli: 3, + LatestObject: &item.Object.Object, + StaleUpdateTime: 2, + Version: 0, // todo set when implemented + }} + f.RClient.On("OverwriteObjects", anyVal, nodes[1], cls, shard, updates).Return(digestR2, nil) + + got, err := finder.GetOne(ctx, types.ConsistencyLevelAll, shard, id, proj, adds) + require.NoError(t, err) + require.Equal(t, item.Object, got) + }) + + t.Run(fmt.Sprintf("ChangedObject_%v", tc.variant), func(t *testing.T) { + vectors := map[string][]float32{"test": {1, 2, 3}} + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: objectWithVectors(id, 3, vectors)} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 2}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + digestR4 = []types.RepairResponse{{ID: id.String(), UpdateTime: 4, Err: "conflict"}} + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR3, nil) + + updates := []*objects.VObject{{ + ID: id, + Deleted: false, + LastUpdateTimeUnixMilli: 3, + LatestObject: &item.Object.Object, + StaleUpdateTime: 2, + Version: 0, + Vectors: vectors, + }} + f.RClient.On("OverwriteObjects", anyVal, nodes[1], cls, shard, updates).Return(digestR4, nil) + + got, err := finder.GetOne(ctx, types.ConsistencyLevelAll, shard, id, proj, adds) + require.Error(t, err) + require.ErrorContains(t, err, replica.MsgCLevel) + require.Nil(t, got) + require.ErrorContains(t, err, replica.ErrRepair.Error()) + f.assertLogContains(t, "msg", "A:3", "B:2", "C:3") + f.assertLogErrorContains(t, "conflict") + }) + + t.Run(fmt.Sprintf("GetContentFromIndirectRead_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item2 = replica.Replica{ID: id, Object: object(id, 2)} + item3 = replica.Replica{ID: id, Object: object(id, 3)} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 2}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR3, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR3, nil) + // called during reparation to fetch the most recent object + f.RClient.On("FetchObject", anyVal, nodes[1], cls, shard, id, proj, adds).Return(item3, nil) + f.RClient.On("FetchObject", anyVal, nodes[2], cls, shard, id, proj, adds).Return(item3, nil) + + f.RClient.On("OverwriteObjects", anyVal, nodes[0], cls, shard, anyVal). + Return(digestR2, nil).RunFn = func(a mock.Arguments) { + updates := a[4].([]*objects.VObject)[0] + require.Equal(t, int64(2), updates.StaleUpdateTime) + require.Equal(t, &item3.Object.Object, updates.LatestObject) + } + + got, err := finder.GetOne(ctx, types.ConsistencyLevelAll, shard, id, proj, adds) + require.Nil(t, err) + require.Equal(t, item3.Object, got) + }) + + t.Run(fmt.Sprintf("OverwriteError_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: object(id, 3)} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 2}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR3, nil) + + updates := []*objects.VObject{{ + ID: id, + LastUpdateTimeUnixMilli: 3, + LatestObject: &item.Object.Object, + StaleUpdateTime: 2, + Version: 0, + }} + f.RClient.On("OverwriteObjects", anyVal, nodes[1], cls, shard, updates).Return(digestR2, errAny) + + got, err := finder.GetOne(ctx, types.ConsistencyLevelAll, shard, id, proj, adds) + require.ErrorContains(t, err, replica.MsgCLevel) + require.ErrorContains(t, err, replica.ErrRepair.Error()) + require.Nil(t, got) + f.assertLogContains(t, "msg", "A:3", "B:2", "C:3") + }) + + t.Run(fmt.Sprintf("CannotGetMostRecentObject_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item1 = replica.Replica{ID: id, Object: object(id, 1)} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 2}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item1, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR3, nil) + // called during reparation to fetch the most recent object + f.RClient.On("FetchObject", anyVal, nodes[2], cls, shard, id, proj, adds).Return(emptyItem, errAny) + + got, err := finder.GetOne(ctx, types.ConsistencyLevelAll, shard, id, proj, adds) + require.ErrorContains(t, err, replica.ErrRepair.Error()) + require.ErrorContains(t, err, replica.MsgCLevel) + require.Nil(t, got) + f.assertLogContains(t, "msg", "A:1", "B:2", "C:3") + }) + t.Run(fmt.Sprintf("MostRecentObjectChanged_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item1 = replica.Replica{ID: id, Object: object(id, 1)} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 2}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item1, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR3, nil) + // called during reparation to fetch the most recent object + f.RClient.On("FetchObject", anyVal, nodes[2], cls, shard, id, proj, adds). + Return(item1, nil).Once() + + got, err := finder.GetOne(ctx, types.ConsistencyLevelAll, shard, id, proj, adds) + require.ErrorContains(t, err, replica.MsgCLevel) + require.ErrorContains(t, err, replica.ErrRepair.Error()) + require.Nil(t, got) + f.assertLogContains(t, "msg", "A:1", "B:2", "C:3") + f.assertLogErrorContains(t, replica.ErrConflictObjectChanged.Error()) + }) + + t.Run(fmt.Sprintf("CreateMissingObject_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: object(id, 3)} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 0, Deleted: false}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3, Deleted: false}} + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR3, nil) + + f.RClient.On("OverwriteObjects", anyVal, nodes[1], cls, shard, anyVal). + Return(digestR2, nil).RunFn = func(a mock.Arguments) { + updates := a[4].([]*objects.VObject)[0] + require.Equal(t, int64(0), updates.StaleUpdateTime) + require.Equal(t, &item.Object.Object, updates.LatestObject) + } + + got, err := finder.GetOne(ctx, types.ConsistencyLevelAll, shard, id, proj, adds) + require.Nil(t, err) + require.Equal(t, item.Object, got) + }) + t.Run(fmt.Sprintf("ConflictDeletedObject_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: nil, Deleted: true} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3, Deleted: false}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3, Deleted: false}} + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR3, nil) + + got, err := finder.GetOne(ctx, types.ConsistencyLevelAll, shard, id, proj, adds) + require.ErrorContains(t, err, replica.MsgCLevel) + require.Equal(t, nilObject, got) + f.assertLogErrorContains(t, replica.ErrConflictExistOrDeleted.Error()) + }) + t.Run(fmt.Sprintf("NoConflictDeletedObject_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: nil, LastUpdateTimeUnixMilli: 3, Deleted: true} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3, Deleted: true}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3, Deleted: true}} + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR3, nil) + + got, err := finder.GetOne(ctx, types.ConsistencyLevelAll, shard, id, proj, adds) + require.NoError(t, err) + require.Equal(t, nilObject, got) + }) + } +} + +func TestRepairerExistsWithALL(t *testing.T) { + var ( + id = strfmt.UUID("123") + cls = "C1" + shard = "SH1" + nodes = []string{"A", "B", "C"} + ctx = context.Background() + adds = additional.Properties{} + proj = search.SelectProperties{} + emptyItem = replica.Replica{} + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + { + variant: "MultiTenant", + isMultiTenant: true, + }, + { + variant: "SingleTenant", + isMultiTenant: false, + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("ChangedObject_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: object(id, 3)} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 2}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + digestR4 = []types.RepairResponse{{ID: id.String(), UpdateTime: 4, Err: "conflict"}} + ) + + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR3, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR3, nil) + // repair + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, nil) + f.RClient.On("FetchObject", anyVal, nodes[2], cls, shard, id, proj, adds).Return(item, nil) + + updates := []*objects.VObject{{ + ID: id, + LastUpdateTimeUnixMilli: 3, + LatestObject: &item.Object.Object, + StaleUpdateTime: 2, + Version: 0, + }} + f.RClient.On("OverwriteObjects", anyVal, nodes[1], cls, shard, updates).Return(digestR4, nil).RunFn = func(a mock.Arguments) { + updates := a[4].([]*objects.VObject)[0] + require.Equal(t, int64(2), updates.StaleUpdateTime) + require.Equal(t, &item.Object.Object, updates.LatestObject) + } + + got, err := finder.Exists(ctx, types.ConsistencyLevelAll, shard, id) + require.ErrorContains(t, err, replica.ErrRepair.Error()) + require.ErrorContains(t, err, replica.MsgCLevel) + require.Equal(t, false, got) + + f.assertLogContains(t, "msg", "A:3", "B:2", "C:3") + f.assertLogErrorContains(t, "conflict") + }) + + t.Run(fmt.Sprintf("Success_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item3 = replica.Replica{ID: id, Object: object(id, 3)} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 2}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR3, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR3, nil) + + // called during reparation to fetch the most recent object + f.RClient.On("FetchObject", anyVal, nodes[1], cls, shard, id, proj, adds).Return(item3, nil) + f.RClient.On("FetchObject", anyVal, nodes[2], cls, shard, id, proj, adds).Return(item3, nil) + + f.RClient.On("OverwriteObjects", anyVal, nodes[0], cls, shard, anyVal). + Return(digestR2, nil).RunFn = func(a mock.Arguments) { + updates := a[4].([]*objects.VObject)[0] + require.Equal(t, int64(2), updates.StaleUpdateTime) + require.Equal(t, &item3.Object.Object, updates.LatestObject) + } + + got, err := finder.Exists(ctx, types.ConsistencyLevelAll, shard, id) + require.Nil(t, err) + require.Equal(t, true, got) + }) + + t.Run(fmt.Sprintf("OverwriteError_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: object(id, 3)} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 2}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, nil) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR3, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR3, nil) + + // called during reparation to fetch the most recent object + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, nil) + f.RClient.On("FetchObject", anyVal, nodes[2], cls, shard, id, proj, adds).Return(item, nil) + + updates := []*objects.VObject{{ + ID: id, + LastUpdateTimeUnixMilli: 3, + LatestObject: &item.Object.Object, + StaleUpdateTime: 2, + Version: 0, + }} + f.RClient.On("OverwriteObjects", anyVal, nodes[1], cls, shard, updates).Return(digestR2, errAny) + + got, err := finder.Exists(ctx, types.ConsistencyLevelAll, shard, id) + require.ErrorContains(t, err, replica.ErrRepair.Error()) + require.ErrorContains(t, err, replica.MsgCLevel) + require.Equal(t, false, got) + + f.assertLogContains(t, "msg", "A:3", "B:2", "C:3") + f.assertLogErrorContains(t, errAny.Error()) + }) + + t.Run(fmt.Sprintf("CannotGetMostRecentObject_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + digestR1 = []types.RepairResponse{{ID: id.String(), UpdateTime: 1}} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 2}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR1, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR3, nil) + // called during reparation to fetch the most recent object + f.RClient.On("FetchObject", anyVal, nodes[2], cls, shard, id, proj, adds).Return(emptyItem, errAny) + + got, err := finder.Exists(ctx, types.ConsistencyLevelAll, shard, id) + require.ErrorContains(t, err, replica.ErrRepair.Error()) + require.ErrorContains(t, err, replica.MsgCLevel) + require.Equal(t, false, got) + + f.assertLogContains(t, "msg", "A:1", "B:2", "C:3") + f.assertLogErrorContains(t, errAny.Error()) + }) + t.Run(fmt.Sprintf("MostRecentObjectChanged_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item1 = replica.Replica{ID: id, Object: object(id, 1)} + digestR1 = []types.RepairResponse{{ID: id.String(), UpdateTime: 1}} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 2}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR1, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR3, nil) + // called during reparation to fetch the most recent object + f.RClient.On("FetchObject", anyVal, nodes[2], cls, shard, id, proj, adds).Return(item1, nil) + + got, err := finder.Exists(ctx, types.ConsistencyLevelAll, shard, id) + require.ErrorContains(t, err, replica.ErrRepair.Error()) + require.ErrorContains(t, err, replica.MsgCLevel) + require.Equal(t, false, got) + f.assertLogContains(t, "msg", "A:1", "B:2", "C:3") + f.assertLogErrorContains(t, replica.ErrConflictObjectChanged.Error()) + }) + + t.Run(fmt.Sprintf("CreateMissingObject_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: object(id, 3)} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 2, Deleted: false}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3, Deleted: false}} + ) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR3, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR3, nil) + + // it can fetch object from the first or third node + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, nil) + f.RClient.On("FetchObject", anyVal, nodes[2], cls, shard, id, proj, adds).Return(item, nil) + + f.RClient.On("OverwriteObjects", anyVal, nodes[1], cls, shard, anyVal). + Return(digestR2, nil).RunFn = func(a mock.Arguments) { + updates := a[4].([]*objects.VObject)[0] + require.Equal(t, int64(2), updates.StaleUpdateTime) + require.Equal(t, &item.Object.Object, updates.LatestObject) + } + + got, err := finder.Exists(ctx, types.ConsistencyLevelAll, shard, id) + require.Nil(t, err) + require.Equal(t, true, got) + }) + + t.Run(fmt.Sprintf("ConflictDeletedObject_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + + digestR0 = []types.RepairResponse{{ID: id.String(), Deleted: true}} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3, Deleted: false}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3, Deleted: false}} + ) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR0, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR3, nil) + + got, err := finder.Exists(ctx, types.ConsistencyLevelAll, shard, id) + require.ErrorContains(t, err, replica.ErrRepair.Error()) + require.ErrorContains(t, err, replica.MsgCLevel) + require.Equal(t, false, got) + f.assertLogErrorContains(t, replica.ErrConflictExistOrDeleted.Error()) + }) + } +} + +func TestRepairerExistsWithConsistencyLevelQuorum(t *testing.T) { + var ( + id = strfmt.UUID("123") + cls = "C1" + shard = "SH1" + nodes = []string{"A", "B", "C"} + ctx = context.Background() + adds = additional.Properties{} + proj = search.SelectProperties{} + emptyItem = replica.Replica{} + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + { + variant: "MultiTenant", + isMultiTenant: true, + }, + { + variant: "SingleTenant", + isMultiTenant: false, + }, + } + + for _, tc := range testCases { + + t.Run(fmt.Sprintf("ChangedObject_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: object(id, 3)} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 2}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + digestR4 = []types.RepairResponse{{ID: id.String(), UpdateTime: 4, Err: "conflict"}} + ) + + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR3, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR2, errAny) + + // repair + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, nil) + + updates := []*objects.VObject{{ + ID: id, + LastUpdateTimeUnixMilli: 3, + LatestObject: &item.Object.Object, + StaleUpdateTime: 2, + Version: 0, + }} + + f.RClient.On("OverwriteObjects", anyVal, nodes[1], cls, shard, updates).Return(digestR4, nil).RunFn = func(a mock.Arguments) { + updates := a[4].([]*objects.VObject)[0] + require.Equal(t, int64(2), updates.StaleUpdateTime) + require.Equal(t, &item.Object.Object, updates.LatestObject) + } + + got, err := finder.Exists(ctx, types.ConsistencyLevelQuorum, shard, id) + require.ErrorContains(t, err, replica.MsgCLevel) + require.Equal(t, false, got) + f.assertLogContains(t, "msg", "A:3", "B:2") + f.assertLogErrorContains(t, "conflict") + }) + + t.Run(fmt.Sprintf("Success_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes[:2], tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item3 = replica.Replica{ID: id, Object: object(id, 3)} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 2}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR3, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR2, errAny) + + // called during reparation to fetch the most recent object + f.RClient.On("FetchObject", anyVal, nodes[1], cls, shard, id, proj, adds).Return(item3, nil) + + f.RClient.On("OverwriteObjects", anyVal, nodes[0], cls, shard, anyVal). + Return(digestR2, nil).RunFn = func(a mock.Arguments) { + updates := a[4].([]*objects.VObject)[0] + require.Equal(t, int64(2), updates.StaleUpdateTime) + require.Equal(t, &item3.Object.Object, updates.LatestObject) + } + + got, err := finder.Exists(ctx, types.ConsistencyLevelQuorum, shard, id) + require.Nil(t, err) + require.Equal(t, true, got) + }) + + t.Run(fmt.Sprintf("OverwriteError_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes[:2], tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: object(id, 3)} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 2}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR3, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR2, errAny) + + // called during reparation to fetch the most recent object + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, nil) + + updates := []*objects.VObject{{ + ID: id, + LastUpdateTimeUnixMilli: 3, + LatestObject: &item.Object.Object, + StaleUpdateTime: 2, + Version: 0, + }} + f.RClient.On("OverwriteObjects", anyVal, nodes[1], cls, shard, updates).Return(digestR2, errAny) + + got, err := finder.Exists(ctx, types.ConsistencyLevelQuorum, shard, id) + require.ErrorContains(t, err, replica.ErrRepair.Error()) + require.ErrorContains(t, err, replica.MsgCLevel) + require.Equal(t, false, got) + f.assertLogContains(t, "msg", "A:3", "B:2") + f.assertLogErrorContains(t, errAny.Error()) + }) + + t.Run(fmt.Sprintf("CannotGetMostRecentObject_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + digestR1 = []types.RepairResponse{{ID: id.String(), UpdateTime: 1}} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 2}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR1, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR2, errAny) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR3, nil) + // called during reparation to fetch the most recent object + f.RClient.On("FetchObject", anyVal, nodes[2], cls, shard, id, proj, adds).Return(emptyItem, errAny) + + got, err := finder.Exists(ctx, types.ConsistencyLevelQuorum, shard, id) + require.ErrorContains(t, err, replica.ErrRepair.Error()) + require.ErrorContains(t, err, replica.MsgCLevel) + require.Equal(t, false, got) + f.assertLogContains(t, "msg", "A:1", "C:3") + f.assertLogErrorContains(t, errAny.Error()) + }) + t.Run(fmt.Sprintf("MostRecentObjectChanged_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item1 = replica.Replica{ID: id, Object: object(id, 1)} + digestR1 = []types.RepairResponse{{ID: id.String(), UpdateTime: 1}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3}} + ) + + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR1, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR3, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, digestIDs).Return(digestR1, errAny) + // called during reparation to fetch the most recent object + f.RClient.On("FetchObject", anyVal, nodes[1], cls, shard, id, proj, adds).Return(item1, nil) + + got, err := finder.Exists(ctx, types.ConsistencyLevelQuorum, shard, id) + require.ErrorContains(t, err, replica.ErrRepair.Error()) + require.ErrorContains(t, err, replica.MsgCLevel) + require.Equal(t, false, got) + + f.assertLogContains(t, "msg", "A:1", "B:3") + f.assertLogErrorContains(t, replica.ErrConflictObjectChanged.Error()) + }) + + t.Run(fmt.Sprintf("CreateMissingObject_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes[:2], tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + item = replica.Replica{ID: id, Object: object(id, 3)} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 2, Deleted: false}} + digestR3 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3, Deleted: false}} + ) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR3, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR2, nil) + + // it can fetch object from the first or third node + f.RClient.On("FetchObject", anyVal, nodes[0], cls, shard, id, proj, adds).Return(item, nil) + + f.RClient.On("OverwriteObjects", anyVal, nodes[1], cls, shard, anyVal). + Return(digestR2, nil).RunFn = func(a mock.Arguments) { + updates := a[4].([]*objects.VObject)[0] + require.Equal(t, int64(2), updates.StaleUpdateTime) + require.Equal(t, &item.Object.Object, updates.LatestObject) + } + + got, err := finder.Exists(ctx, types.ConsistencyLevelQuorum, shard, id) + require.Nil(t, err) + require.Equal(t, true, got) + }) + + t.Run(fmt.Sprintf("ConflictDeletedObject_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes[:2], tc.isMultiTenant) + finder = f.newFinder("A") + digestIDs = []strfmt.UUID{id} + + digestR0 = []types.RepairResponse{{ID: id.String(), UpdateTime: 0, Deleted: true}} + digestR2 = []types.RepairResponse{{ID: id.String(), UpdateTime: 3, Deleted: false}} + ) + f.RClient.On("DigestObjects", anyVal, nodes[0], cls, shard, digestIDs).Return(digestR0, nil) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, digestIDs).Return(digestR2, nil) + + got, err := finder.Exists(ctx, types.ConsistencyLevelQuorum, shard, id) + require.ErrorContains(t, err, replica.ErrRepair.Error()) + require.ErrorContains(t, err, replica.MsgCLevel) + f.assertLogErrorContains(t, replica.ErrConflictExistOrDeleted.Error()) + require.Equal(t, false, got) + }) + } +} + +func TestRepairerCheckConsistencyAll(t *testing.T) { + var ( + ids = []strfmt.UUID{"01", "02", "03"} + cls = "C1" + shard = "S1" + nodes = []string{"A", "B", "C"} + ctx = context.Background() + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + { + variant: "MultiTenant", + isMultiTenant: true, + }, + { + variant: "SingleTenant", + isMultiTenant: false, + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("GetMostRecentContent1_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + directR = []*storobj.Object{ + objectEx(ids[0], 4, shard, "A"), + objectEx(ids[1], 5, shard, "A"), + objectEx(ids[2], 6, shard, "A"), + } + directRe = []replica.Replica{ + repl(ids[0], 4, false), + repl(ids[1], 5, false), + repl(ids[2], 6, false), + } + + digestR2 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 4}, + {ID: ids[1].String(), UpdateTime: 2}, + {ID: ids[2].String(), UpdateTime: 0}, // doesn't exist + } + digestR3 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 0}, // doesn't exist + {ID: ids[1].String(), UpdateTime: 5}, + {ID: ids[2].String(), UpdateTime: 3}, + } + want = setObjectsConsistency(directR, true) + ) + + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, anyVal).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, anyVal).Return(digestR3, nil) + // refresh + f.RClient.On("FetchObjects", anyVal, nodes[0], cls, shard, anyVal).Return(directRe, nil). + Once(). + RunFn = func(a mock.Arguments) { + got := a[4].([]strfmt.UUID) + require.ElementsMatch(t, ids, got) + } + + f.RClient.On("OverwriteObjects", anyVal, nodes[1], cls, shard, anyVal). + Return(digestR2, nil). + Once(). + RunFn = func(a mock.Arguments) { + got := a[4].([]*objects.VObject) + want := []*objects.VObject{ + { + ID: ids[1], + LastUpdateTimeUnixMilli: 5, + LatestObject: &directR[1].Object, + StaleUpdateTime: 2, + }, + { + ID: ids[2], + LastUpdateTimeUnixMilli: 6, + LatestObject: &directR[2].Object, + StaleUpdateTime: 0, + }, + } + + require.ElementsMatch(t, want, got) + } + f.RClient.On("OverwriteObjects", anyVal, nodes[2], cls, shard, anyVal). + Return(digestR2, nil). + Once(). + RunFn = func(a mock.Arguments) { + got := a[4].([]*objects.VObject) + want := []*objects.VObject{ + { + ID: ids[0], + LastUpdateTimeUnixMilli: 4, + LatestObject: &directR[0].Object, + StaleUpdateTime: 0, + }, + { + ID: ids[2], + LastUpdateTimeUnixMilli: 6, + LatestObject: &directR[2].Object, + StaleUpdateTime: 3, + }, + } + require.ElementsMatch(t, want, got) + } + + err := finder.CheckConsistency(ctx, types.ConsistencyLevelAll, directR) + require.Nil(t, err) + require.Equal(t, want, directR) + }) + + t.Run(fmt.Sprintf("GetMostRecentContent2_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, cls, shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + ids = []strfmt.UUID{"1", "2", "3", "4", "5"} + result = []*storobj.Object{ + objectEx(ids[0], 2, shard, "A"), + objectEx(ids[1], 2, shard, "A"), + objectEx(ids[2], 3, shard, "A"), + objectEx(ids[3], 4, shard, "A"), + objectEx(ids[4], 3, shard, "A"), + } + + xs = []*storobj.Object{ + objectEx(ids[0], 1, shard, "A"), + objectEx(ids[1], 1, shard, "A"), + objectEx(ids[2], 2, shard, "A"), + objectEx(ids[3], 4, shard, "A"), // latest + objectEx(ids[4], 2, shard, "A"), + } + + directRe = []replica.Replica{ + repl(ids[3], 4, false), + } + digestR2 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 2}, // latest + {ID: ids[1].String(), UpdateTime: 2}, // latest + {ID: ids[2].String(), UpdateTime: 1}, + {ID: ids[3].String(), UpdateTime: 1}, + {ID: ids[4].String(), UpdateTime: 1}, + } + digestR3 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 1}, + {ID: ids[1].String(), UpdateTime: 1}, + {ID: ids[2].String(), UpdateTime: 3}, // latest + {ID: ids[3].String(), UpdateTime: 1}, + {ID: ids[4].String(), UpdateTime: 3}, // latest + } + directR2 = []replica.Replica{ + repl(ids[0], 2, false), + repl(ids[1], 2, false), + } + directR3 = []replica.Replica{ + repl(ids[2], 3, false), + repl(ids[4], 3, false), + } + want = setObjectsConsistency(result, true) + ) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, ids).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, ids).Return(digestR3, nil) + + // refetch objects + f.RClient.On("FetchObjects", anyVal, nodes[0], cls, shard, anyVal).Return(directRe, nil). + Once(). + RunFn = func(a mock.Arguments) { + got := a[4].([]strfmt.UUID) + require.ElementsMatch(t, ids[3:4], got) + } + + // fetch most recent objects + f.RClient.On("FetchObjects", anyVal, nodes[1], cls, shard, anyVal).Return(directR2, nil). + Once(). + RunFn = func(a mock.Arguments) { + got := a[4].([]strfmt.UUID) + require.ElementsMatch(t, ids[:2], got) + } + f.RClient.On("FetchObjects", anyVal, nodes[2], cls, shard, anyVal).Return(directR3, nil). + Once(). + RunFn = func(a mock.Arguments) { + got := a[4].([]strfmt.UUID) + require.ElementsMatch(t, []strfmt.UUID{ids[2], ids[4]}, got) + } + + // repair + var ( + overwriteR1 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 1}, + {ID: ids[1].String(), UpdateTime: 1}, + {ID: ids[2].String(), UpdateTime: 2}, + {ID: ids[4].String(), UpdateTime: 2}, + } + overwriteR2 = []types.RepairResponse{ + {ID: ids[2].String(), UpdateTime: 1}, + {ID: ids[3].String(), UpdateTime: 1}, + {ID: ids[4].String(), UpdateTime: 1}, + } + overwriteR3 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 1}, + {ID: ids[1].String(), UpdateTime: 1}, + {ID: ids[3].String(), UpdateTime: 1}, + } + ) + f.RClient.On("OverwriteObjects", anyVal, nodes[0], cls, shard, anyVal). + Return(overwriteR1, nil). + Once(). + RunFn = func(a mock.Arguments) { + got := a[4].([]*objects.VObject) + want := []*objects.VObject{ + { + ID: ids[0], + LastUpdateTimeUnixMilli: 2, + LatestObject: &result[0].Object, + StaleUpdateTime: 1, + }, + { + ID: ids[1], + LastUpdateTimeUnixMilli: 2, + LatestObject: &result[1].Object, + StaleUpdateTime: 1, + }, + { + ID: ids[2], + LastUpdateTimeUnixMilli: 3, + LatestObject: &result[2].Object, + StaleUpdateTime: 2, + }, + { + ID: ids[4], + LastUpdateTimeUnixMilli: 3, + LatestObject: &result[4].Object, + StaleUpdateTime: 2, + }, + } + + require.ElementsMatch(t, want, got) + } + + f.RClient.On("OverwriteObjects", anyVal, nodes[1], cls, shard, anyVal). + Return(overwriteR2, nil). + Once(). + RunFn = func(a mock.Arguments) { + got := a[4].([]*objects.VObject) + want := []*objects.VObject{ + { + ID: ids[2], + LastUpdateTimeUnixMilli: 3, + LatestObject: &result[2].Object, + StaleUpdateTime: 1, + }, + { + ID: ids[3], + LastUpdateTimeUnixMilli: 4, + LatestObject: &result[3].Object, + StaleUpdateTime: 1, + }, + { + ID: ids[4], + LastUpdateTimeUnixMilli: 3, + LatestObject: &result[4].Object, + StaleUpdateTime: 1, + }, + } + + require.ElementsMatch(t, want, got) + } + f.RClient.On("OverwriteObjects", anyVal, nodes[2], cls, shard, anyVal). + Return(overwriteR3, nil). + Once(). + RunFn = func(a mock.Arguments) { + got := a[4].([]*objects.VObject) + want := []*objects.VObject{ + { + ID: ids[0], + LastUpdateTimeUnixMilli: 2, + LatestObject: &result[0].Object, + StaleUpdateTime: 1, + }, + { + ID: ids[1], + LastUpdateTimeUnixMilli: 2, + LatestObject: &result[1].Object, + StaleUpdateTime: 1, + }, + { + ID: ids[3], + LastUpdateTimeUnixMilli: 4, + LatestObject: &result[3].Object, + StaleUpdateTime: 1, + }, + } + require.ElementsMatch(t, want, got) + } + + err := finder.CheckConsistency(ctx, types.ConsistencyLevelAll, xs) + require.Nil(t, err) + require.Equal(t, want, xs) + }) + + t.Run(fmt.Sprintf("OverwriteChangedObject_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + xs = []*storobj.Object{ + objectEx(ids[0], 4, shard, "A"), + objectEx(ids[1], 5, shard, "A"), + objectEx(ids[2], 6, shard, "A"), + } + digestR2 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 4}, + {ID: ids[1].String(), UpdateTime: 2}, + {ID: ids[2].String(), UpdateTime: 3}, + } + digestR3 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 1}, + {ID: ids[1].String(), UpdateTime: 5}, + {ID: ids[2].String(), UpdateTime: 3}, + } + directR2 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 4}, + {ID: ids[1].String(), UpdateTime: 2}, + {ID: ids[2].String(), UpdateTime: 1, Err: "conflict"}, // this one + } + + directRe = []replica.Replica{ + repl(ids[0], 4, false), + repl(ids[1], 5, false), + repl(ids[2], 6, false), + } + ) + want := setObjectsConsistency(xs, true) + want[2].IsConsistent = false + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, ids).Return(digestR2, nil) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, ids).Return(digestR3, nil) + + // refetch objects + f.RClient.On("FetchObjects", anyVal, nodes[0], cls, shard, anyVal).Return(directRe, nil). + Once(). + RunFn = func(a mock.Arguments) { + got := a[4].([]strfmt.UUID) + require.ElementsMatch(t, ids, got) + } + + f.RClient.On("OverwriteObjects", anyVal, nodes[1], cls, shard, anyVal). + Return(directR2, nil). + Once(). + RunFn = func(a mock.Arguments) { + got := a[4].([]*objects.VObject) + want := []*objects.VObject{ + { + ID: xs[1].ID(), + Deleted: false, + LastUpdateTimeUnixMilli: xs[1].Object.LastUpdateTimeUnix, + LatestObject: &xs[1].Object, + StaleUpdateTime: 2, + }, + { + ID: xs[2].ID(), + Deleted: false, + LastUpdateTimeUnixMilli: xs[2].Object.LastUpdateTimeUnix, + LatestObject: &xs[2].Object, + StaleUpdateTime: 3, + }, + } + + require.ElementsMatch(t, want, got) + } + f.RClient.On("OverwriteObjects", anyVal, nodes[2], cls, shard, anyVal). + Return(digestR2, nil). + Once(). + RunFn = func(a mock.Arguments) { + got := a[4].([]*objects.VObject) + want := []*objects.VObject{ + { + ID: xs[0].ID(), + Deleted: false, + LastUpdateTimeUnixMilli: xs[0].Object.LastUpdateTimeUnix, + LatestObject: &xs[0].Object, + StaleUpdateTime: 1, + }, + { + ID: xs[2].ID(), + Deleted: false, + LastUpdateTimeUnixMilli: xs[2].Object.LastUpdateTimeUnix, + LatestObject: &xs[2].Object, + StaleUpdateTime: 3, + }, + } + require.ElementsMatch(t, want, got) + } + + err := finder.CheckConsistency(ctx, types.ConsistencyLevelAll, xs) + require.Nil(t, err) + require.Equal(t, want, xs) + }) + + t.Run(fmt.Sprintf("OverwriteError_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + ids = []strfmt.UUID{"1", "2", "3"} + xs = []*storobj.Object{ + objectEx(ids[0], 2, shard, "A"), + objectEx(ids[1], 3, shard, "A"), + objectEx(ids[2], 1, shard, "A"), + } + + digestR2 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 1}, + {ID: ids[1].String(), UpdateTime: 3}, // latest + {ID: ids[2].String(), UpdateTime: 1}, + } + digestR3 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 1}, + {ID: ids[1].String(), UpdateTime: 1}, + {ID: ids[2].String(), UpdateTime: 4}, // latest + } + directR2 = []replica.Replica{ + repl(ids[1], 3, false), + } + directR3 = []replica.Replica{ + repl(ids[2], 4, false), + } + directRe = []replica.Replica{ + repl(ids[0], 2, false), + repl(ids[1], 3, false), + } + ) + + want := setObjectsConsistency([]*storobj.Object{ + xs[0], + directR2[0].Object, + xs[2], + }, false) + want[1].IsConsistent = true + want[1].BelongsToNode = "A" + want[1].BelongsToShard = shard + + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, ids). + Return(digestR2, nil). + Once() + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, ids). + Return(digestR3, nil). + Once() + + // refetch objects + f.RClient.On("FetchObjects", anyVal, nodes[0], cls, shard, anyVal).Return(directRe, nil). + Once(). + RunFn = func(a mock.Arguments) { + got := a[4].([]strfmt.UUID) + require.ElementsMatch(t, ids[:2], got) + } + + // fetch most recent objects + f.RClient.On("FetchObjects", anyVal, nodes[1], cls, shard, anyVal). + Return(directR2, nil). + Once() + f.RClient.On("FetchObjects", anyVal, nodes[2], cls, shard, anyVal). + Return(directR3, nil). + Once() + // repair + var ( + repairR1 = []types.RepairResponse{ + {ID: ids[1].String(), UpdateTime: 1}, + {ID: ids[2].String(), UpdateTime: 1}, + } + + repairR2 = []types.RepairResponse(nil) + repairR3 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 1}, + {ID: ids[1].String(), UpdateTime: 1}, + } + ) + f.RClient.On("OverwriteObjects", anyVal, nodes[0], cls, shard, anyVal). + Return(repairR1, nil). + Once() + + f.RClient.On("OverwriteObjects", anyVal, nodes[1], cls, shard, anyVal). + Return(repairR2, errAny). + Once() + f.RClient.On("OverwriteObjects", anyVal, nodes[2], cls, shard, anyVal). + Return(repairR3, nil). + Once() + + err := finder.CheckConsistency(ctx, types.ConsistencyLevelAll, xs) + require.Nil(t, err) + require.Equal(t, want, xs) + }) + + t.Run(fmt.Sprintf("DirectReadEmptyResponse_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + ids = []strfmt.UUID{"1", "2", "3"} + xs = []*storobj.Object{ + objectEx(ids[0], 2, shard, "A"), + objectEx(ids[1], 3, shard, "A"), + objectEx(ids[2], 1, shard, "A"), + } + + digestR2 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 2}, + {ID: ids[1].String(), UpdateTime: 3}, // latest + {ID: ids[2].String(), UpdateTime: 1}, + } + digestR3 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 2}, + {ID: ids[1].String(), UpdateTime: 3}, + {ID: ids[2].String(), UpdateTime: 4}, // latest + } + directR2 = []replica.Replica{ + repl(ids[1], 3, false), + } + directR3 = []replica.Replica{ + repl(ids[2], 4, false), + } + ) + + want := setObjectsConsistency(xs, true) + want[2].Object.LastUpdateTimeUnix = 4 + + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, ids). + Return(digestR2, nil). + Once() + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, ids). + Return(digestR3, nil). + Once() + + // fetch most recent objects + f.RClient.On("FetchObjects", anyVal, nodes[1], cls, shard, anyVal). + Return(directR2, nil). + Once() + // response must at least contain one item + f.RClient.On("FetchObjects", anyVal, nodes[2], cls, shard, anyVal). + Return(directR3, nil). + Once() + // repair + var ( + repairR1 = []types.RepairResponse{ + {ID: ids[1].String(), UpdateTime: 1}, + {ID: ids[2].String(), UpdateTime: 1}, + } + + repairR2 = []types.RepairResponse(nil) + ) + f.RClient.On("OverwriteObjects", anyVal, nodes[0], cls, shard, anyVal). + Return(repairR1, nil). + Once() + + f.RClient.On("OverwriteObjects", anyVal, nodes[1], cls, shard, anyVal). + Return(repairR2, nil). + Once() + + err := finder.CheckConsistency(ctx, types.ConsistencyLevelAll, xs) + require.Nil(t, err) + require.Equal(t, want, xs) + }) + + t.Run(fmt.Sprintf("DirectReadEUnexpectedResponse_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + ids = []strfmt.UUID{"1", "2", "3"} + xs = []*storobj.Object{ + objectEx(ids[0], 2, shard, "A"), + objectEx(ids[1], 3, shard, "A"), + objectEx(ids[2], 1, shard, "A"), + } + + digestR2 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 2}, + {ID: ids[1].String(), UpdateTime: 3}, // latest + {ID: ids[2].String(), UpdateTime: 1}, + } + digestR3 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 2}, + {ID: ids[1].String(), UpdateTime: 3}, + {ID: ids[2].String(), UpdateTime: 4}, // latest + } + directR2 = []replica.Replica{ + repl(ids[1], 3, false), + } + // unexpected response UpdateTime is 3 instead of 4 + directR3 = []replica.Replica{repl(ids[2], 3, false)} + + directRe = []replica.Replica{ + repl(ids[0], 2, false), + repl(ids[1], 3, false), + } + ) + + want := setObjectsConsistency(xs, true) + want[2].IsConsistent = false + + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, ids). + Return(digestR2, nil). + Once() + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, ids). + Return(digestR3, nil). + Once() + + // refetch + f.RClient.On("FetchObjects", anyVal, nodes[0], cls, shard, anyVal).Return(directRe, nil). + Once(). + RunFn = func(a mock.Arguments) { + got := a[4].([]strfmt.UUID) + require.ElementsMatch(t, ids[:2], got) + } + + // fetch most recent objects + f.RClient.On("FetchObjects", anyVal, nodes[1], cls, shard, anyVal). + Return(directR2, nil). + Once() + // response must at least contain one item + f.RClient.On("FetchObjects", anyVal, nodes[2], cls, shard, anyVal). + Return(directR3, nil). + Once() + // repair + var ( + repairR1 = []types.RepairResponse{ + {ID: ids[1].String(), UpdateTime: 1}, + {ID: ids[2].String(), UpdateTime: 1}, + } + + repairR2 = []types.RepairResponse(nil) + ) + f.RClient.On("OverwriteObjects", anyVal, nodes[0], cls, shard, anyVal). + Return(repairR1, nil). + Once() + + f.RClient.On("OverwriteObjects", anyVal, nodes[1], cls, shard, anyVal). + Return(repairR2, nil). + Once() + + err := finder.CheckConsistency(ctx, types.ConsistencyLevelAll, xs) + require.Nil(t, err) + require.Equal(t, want, xs) + }) + + t.Run(fmt.Sprintf("OrphanObject_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + ids = []strfmt.UUID{"1", "2", "3"} + xs = []*storobj.Object{ + objectEx(ids[0], 2, shard, "A"), + objectEx(ids[1], 3, shard, "A"), + objectEx(ids[2], 1, shard, "A"), + } + + digestR2 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 1}, + {ID: ids[1].String(), UpdateTime: 3}, // latest + {ID: ids[2].String(), UpdateTime: 1, Deleted: true}, + } + digestR3 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 1}, + {ID: ids[1].String(), UpdateTime: 1}, + {ID: ids[2].String(), UpdateTime: 4, Deleted: true}, // latest + } + directR2 = []replica.Replica{ + repl(ids[1], 3, false), + } + + directRe = []replica.Replica{ + repl(ids[0], 2, false), + repl(ids[1], 3, false), + } + ) + + want := setObjectsConsistency(xs, true) + want[2].IsConsistent = false // orphan + + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, ids). + Return(digestR2, nil). + Once() + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, ids). + Return(digestR3, nil). + Once() + + // refetch + f.RClient.On("FetchObjects", anyVal, nodes[0], cls, shard, anyVal).Return(directRe, nil). + Once(). + RunFn = func(a mock.Arguments) { + got := a[4].([]strfmt.UUID) + require.ElementsMatch(t, ids[:2], got) + } + + // fetch most recent objects + f.RClient.On("FetchObjects", anyVal, nodes[1], cls, shard, anyVal). + Return(directR2, nil). + Once() + // repair + var ( + repairR2 = []types.RepairResponse{ + {ID: ids[1].String(), UpdateTime: 1}, + } + + repairR3 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 1}, + {ID: ids[1].String(), UpdateTime: 1}, + } + ) + + f.RClient.On("OverwriteObjects", anyVal, nodes[1], cls, shard, anyVal). + Return(repairR2, nil). + Once() + f.RClient.On("OverwriteObjects", anyVal, nodes[2], cls, shard, anyVal). + Return(repairR3, nil). + Once() + + err := finder.CheckConsistency(ctx, types.ConsistencyLevelAll, xs) + require.Nil(t, err) + require.Equal(t, want, xs) + }) + } +} + +func TestRepairerCheckConsistencyQuorum(t *testing.T) { + var ( + ids = []strfmt.UUID{"10", "20", "30"} + cls = "C1" + shard = "SH1" + nodes = []string{"A", "B", "C"} + ctx = context.Background() + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + {"MultiTenant", true}, + {"SingleTenant", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("CheckConsistencyQuorum_%v", tc.variant), func(t *testing.T) { + var ( + f = newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + finder = f.newFinder("A") + xs = []*storobj.Object{ + objectEx(ids[0], 4, shard, "A"), + objectEx(ids[1], 5, shard, "A"), + objectEx(ids[2], 6, shard, "A"), + } + digestR2 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 4}, + {ID: ids[1].String(), UpdateTime: 2}, + {ID: ids[2].String(), UpdateTime: 3}, + } + digestR3 = []types.RepairResponse{ + {ID: ids[0].String(), UpdateTime: 1}, + {ID: ids[1].String(), UpdateTime: 5}, + {ID: ids[2].String(), UpdateTime: 3}, + } + directRe = []replica.Replica{ + repl(ids[0], 4, false), + // repl(ids[1], 5, false), + repl(ids[2], 6, false), + } + want = setObjectsConsistency(xs, true) + ) + f.RClient.On("DigestObjects", anyVal, nodes[1], cls, shard, ids).Return(digestR2, errAny) + f.RClient.On("DigestObjects", anyVal, nodes[2], cls, shard, ids).Return(digestR3, nil) + + // refetch + f.RClient.On("FetchObjects", anyVal, nodes[0], cls, shard, anyVal).Return(directRe, nil). + Once(). + RunFn = func(a mock.Arguments) { + got := a[4].([]strfmt.UUID) + require.ElementsMatch(t, []strfmt.UUID{ids[0], ids[2]}, got) + } + f.RClient.On("OverwriteObjects", anyVal, nodes[2], cls, shard, anyVal). + Return(digestR2, nil). + Once(). + RunFn = func(a mock.Arguments) { + got := a[4].([]*objects.VObject) + want := []*objects.VObject{ + { + ID: xs[0].ID(), + Deleted: false, + LastUpdateTimeUnixMilli: xs[0].Object.LastUpdateTimeUnix, + LatestObject: &xs[0].Object, + StaleUpdateTime: 1, + }, + { + ID: xs[2].ID(), + Deleted: false, + LastUpdateTimeUnixMilli: xs[2].Object.LastUpdateTimeUnix, + LatestObject: &xs[2].Object, + StaleUpdateTime: 3, + }, + } + require.ElementsMatch(t, want, got) + } + + err := finder.CheckConsistency(ctx, types.ConsistencyLevelQuorum, xs) + require.Nil(t, err) + require.Equal(t, want, xs) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/replica/replica.go b/platform/dbops/binaries/weaviate-src/usecases/replica/replica.go new file mode 100644 index 0000000000000000000000000000000000000000..51964c1eccda79745f95f2e65e5f137775d0e6a9 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/replica/replica.go @@ -0,0 +1,142 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replica + +import ( + "encoding/json" + "fmt" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/entities/storobj" +) + +// Replica represents a replicated data item +type Replica struct { + ID strfmt.UUID `json:"id,omitempty"` + Deleted bool `json:"deleted"` + Object *storobj.Object `json:"object,omitempty"` + LastUpdateTimeUnixMilli int64 `json:"lastUpdateTimeUnixMilli"` +} + +// robjectMarshaler is a helper for the methods implementing encoding.BinaryMarshaler +// +// Because *storobj.Object has an optimized custom MarshalBinary method, that is what +// we want to use when serializing, rather than json.Marshal. This is just a thin +// wrapper around the storobj bytes resulting from the underlying call to MarshalBinary +type robjectMarshaler struct { + ID strfmt.UUID + Deleted bool + LastUpdateTimeUnixMilli int64 + Object []byte +} + +func (r *Replica) MarshalBinary() ([]byte, error) { + b := robjectMarshaler{ + ID: r.ID, + Deleted: r.Deleted, + LastUpdateTimeUnixMilli: r.LastUpdateTimeUnixMilli, + } + if r.Object != nil { + obj, err := r.Object.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("marshal object: %w", err) + } + b.Object = obj + } + + return json.Marshal(b) +} + +func (r *Replica) UnmarshalBinary(data []byte) error { + var b robjectMarshaler + + err := json.Unmarshal(data, &b) + if err != nil { + return err + } + r.ID = b.ID + r.Deleted = b.Deleted + r.LastUpdateTimeUnixMilli = b.LastUpdateTimeUnixMilli + + if b.Object != nil { + var obj storobj.Object + err = obj.UnmarshalBinary(b.Object) + if err != nil { + return fmt.Errorf("unmarshal object: %w", err) + } + r.Object = &obj + } + + return nil +} + +type Replicas []Replica + +func (ro Replicas) MarshalBinary() ([]byte, error) { + ms := make([]robjectMarshaler, len(ro)) + + for i, obj := range ro { + m := robjectMarshaler{ + ID: obj.ID, + Deleted: obj.Deleted, + LastUpdateTimeUnixMilli: obj.LastUpdateTimeUnixMilli, + } + if obj.Object != nil { + b, err := obj.Object.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("marshal object %q: %w", obj.ID, err) + } + m.Object = b + } + ms[i] = m + } + + return json.Marshal(ms) +} + +func (ro *Replicas) UnmarshalBinary(data []byte) error { + var ms []robjectMarshaler + + err := json.Unmarshal(data, &ms) + if err != nil { + return err + } + + reps := make(Replicas, len(ms)) + for i, m := range ms { + rep := Replica{ + ID: m.ID, + Deleted: m.Deleted, + LastUpdateTimeUnixMilli: m.LastUpdateTimeUnixMilli, + } + if m.Object != nil { + var obj storobj.Object + err = obj.UnmarshalBinary(m.Object) + if err != nil { + return fmt.Errorf("unmarshal object %q: %w", m.ID, err) + } + rep.Object = &obj + } + reps[i] = rep + } + + *ro = reps + return nil +} + +// UpdateTime return update time if it exists and 0 otherwise +func (r Replica) UpdateTime() int64 { + if r.Object != nil { + return r.Object.LastUpdateTimeUnix() + } + return r.LastUpdateTimeUnixMilli +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/replica/replication_test.go b/platform/dbops/binaries/weaviate-src/usecases/replica/replication_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2f365594d1a6efbf55226b3bfc4badce58b5d876 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/replica/replication_test.go @@ -0,0 +1,368 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replica_test + +import ( + "testing" + "time" + + "github.com/weaviate/weaviate/usecases/replica" + + "github.com/weaviate/weaviate/usecases/objects" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/schema/crossref" + "github.com/weaviate/weaviate/entities/storobj" +) + +func Test_VObject_MarshalBinary(t *testing.T) { + now := time.Now() + tests := []struct { + name string + vector []float32 + vectors models.Vectors + }{ + { + name: "with vector", + vector: []float32{1, 2, 3, 4, 5}, + }, + { + name: "with vectors", + vectors: models.Vectors{ + "vec1": []float32{0.1, 0.2, 0.3, 0.4}, + "vec2": []float32{1, 2}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + obj := models.Object{ + ID: strfmt.UUID("c6f85bf5-c3b7-4c1d-bd51-e899f9605336"), + Class: "SomeClass", + CreationTimeUnix: now.UnixMilli(), + LastUpdateTimeUnix: now.Add(time.Hour).UnixMilli(), // time-traveling ;) + Properties: map[string]interface{}{ + "propA": "this is prop A", + "propB": "this is prop B", + "someDate": now.Format(time.RFC3339Nano), + "aNumber": 1e+06, + "crossRef": map[string]interface{}{ + "beacon": "weaviate://localhost/OtherClass/c82d011c-f05a-43de-8a8a-ee9c814d4cfb", + }, + }, + Vector: tt.vector, + Additional: map[string]interface{}{ + "score": 0.055465422484, + }, + } + if tt.vectors != nil { + obj.Vectors = tt.vectors + } + + t.Run("when object is present", func(t *testing.T) { + expected := objects.VObject{ + LatestObject: &obj, + StaleUpdateTime: now.UnixMilli(), + Version: 1, + } + + b, err := expected.MarshalBinary() + require.Nil(t, err) + + var received objects.VObject + err = received.UnmarshalBinary(b) + require.Nil(t, err) + + assert.EqualValues(t, expected, received) + }) + + t.Run("when object is present", func(t *testing.T) { + expected := objects.VObject{ + LatestObject: &obj, + StaleUpdateTime: now.UnixMilli(), + Version: 1, + } + + b, err := expected.MarshalBinary() + require.Nil(t, err) + + var received objects.VObject + err = received.UnmarshalBinary(b) + require.Nil(t, err) + + assert.EqualValues(t, expected, received) + }) + + t.Run("when object is nil", func(t *testing.T) { + expected := objects.VObject{ + LatestObject: nil, + StaleUpdateTime: now.UnixMilli(), + Version: 1, + } + + b, err := expected.MarshalBinary() + require.Nil(t, err) + + var received objects.VObject + err = received.UnmarshalBinary(b) + require.Nil(t, err) + + assert.EqualValues(t, expected, received) + }) + }) + } +} + +func Test_Replica_MarshalBinary(t *testing.T) { + now := time.Now() + id := strfmt.UUID("c6f85bf5-c3b7-4c1d-bd51-e899f9605336") + tests := []struct { + name string + vector []float32 + vectors map[string][]float32 + }{ + { + name: "with vector", + vector: []float32{1, 2, 3, 4, 5}, + }, + { + name: "with vectors", + vectors: map[string][]float32{ + "vec1": {0.1, 0.2, 0.3, 0.4}, + "vec2": {1, 2}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + obj := storobj.Object{ + MarshallerVersion: 1, + Object: models.Object{ + ID: id, + Class: "SomeClass", + CreationTimeUnix: now.UnixMilli(), + LastUpdateTimeUnix: now.Add(time.Hour).UnixMilli(), // time-traveling ;) + Properties: map[string]interface{}{ + "propA": "this is prop A", + "propB": "this is prop B", + "someDate": now.Format(time.RFC3339Nano), + "aNumber": 1e+06, + "crossRef": models.MultipleRef{ + crossref.NewLocalhost("OtherClass", id). + SingleRef(), + }, + }, + Additional: map[string]interface{}{ + "score": 0.055465422484, + }, + }, + } + if tt.vector != nil { + obj.Vector = tt.vector + obj.VectorLen = len(tt.vector) + } + if tt.vectors != nil { + obj.Vector = []float32{} + obj.Vectors = tt.vectors + } + + t.Run("when object is present", func(t *testing.T) { + expected := replica.Replica{ + Object: &obj, + ID: obj.ID(), + } + + b, err := expected.MarshalBinary() + require.Nil(t, err) + + var received replica.Replica + err = received.UnmarshalBinary(b) + require.Nil(t, err) + + assert.EqualValues(t, expected.Object, received.Object) + assert.EqualValues(t, expected.ID, received.ID) + assert.EqualValues(t, expected.Deleted, received.Deleted) + }) + + t.Run("when object is nil", func(t *testing.T) { + expected := replica.Replica{ + Object: nil, + ID: obj.ID(), + } + + b, err := expected.MarshalBinary() + require.Nil(t, err) + + var received replica.Replica + err = received.UnmarshalBinary(b) + require.Nil(t, err) + + assert.EqualValues(t, expected.Object, received.Object) + assert.EqualValues(t, expected.ID, received.ID) + assert.EqualValues(t, expected.Deleted, received.Deleted) + }) + }) + } +} + +func Test_Replicas_MarshalBinary(t *testing.T) { + now := time.Now() + id1 := strfmt.UUID("c6f85bf5-c3b7-4c1d-bd51-e899f9605336") + id2 := strfmt.UUID("88750a99-a72d-46c2-a582-89f02654391d") + tests := []struct { + name string + vec1, vec2 []float32 + vectors1, vectors2 map[string][]float32 + }{ + { + name: "with vector", + vec1: []float32{1, 2, 3, 4, 5}, + vec2: []float32{10, 20, 30, 40, 50}, + }, + { + name: "with vectors", + vectors1: map[string][]float32{ + "vec1": {0.1, 0.2, 0.3, 0.4}, + "vec2": {1, 2}, + }, + vectors2: map[string][]float32{ + "vec1": {0.11, 0.22, 0.33, 0.44}, + "vec2": {11, 22}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + obj1 := storobj.Object{ + MarshallerVersion: 1, + Object: models.Object{ + ID: id1, + Class: "SomeClass", + CreationTimeUnix: now.UnixMilli(), + LastUpdateTimeUnix: now.Add(time.Hour).UnixMilli(), // time-traveling ;) + Properties: map[string]interface{}{ + "propA": "this is prop A", + "propB": "this is prop B", + "someDate": now.Format(time.RFC3339Nano), + "aNumber": 1e+06, + "crossRef": models.MultipleRef{ + crossref.NewLocalhost("OtherClass", id1). + SingleRef(), + }, + }, + Additional: map[string]interface{}{ + "score": 0.055465422484, + }, + }, + Vector: []float32{}, + } + if tt.vec1 != nil { + obj1.Vector = tt.vec1 + obj1.VectorLen = len(tt.vec1) + } + if tt.vectors1 != nil { + obj1.Vector = []float32{} + obj1.Vectors = tt.vectors2 + } + + obj2 := storobj.Object{ + MarshallerVersion: 1, + Object: models.Object{ + ID: id2, + Class: "SomeClass", + CreationTimeUnix: now.UnixMilli(), + LastUpdateTimeUnix: now.Add(time.Hour).UnixMilli(), // time-traveling ;) + Properties: map[string]interface{}{ + "propA": "this is prop A", + "propB": "this is prop B", + "someDate": now.Format(time.RFC3339Nano), + "aNumber": 1e+06, + "crossRef": models.MultipleRef{ + crossref.NewLocalhost("OtherClass", id2). + SingleRef(), + }, + }, + Additional: map[string]interface{}{ + "score": 0.055465422484, + }, + }, + } + if tt.vec2 != nil { + obj2.Vector = tt.vec2 + obj2.VectorLen = len(tt.vec2) + } + if tt.vectors2 != nil { + obj2.Vector = []float32{} + obj2.Vectors = tt.vectors2 + } + + t.Run("when objects are present", func(t *testing.T) { + expected := replica.Replicas{ + { + Object: &obj1, + ID: obj1.ID(), + }, + { + Object: &obj2, + ID: obj2.ID(), + }, + } + + b, err := expected.MarshalBinary() + require.Nil(t, err) + + var received replica.Replicas + err = received.UnmarshalBinary(b) + require.Nil(t, err) + + assert.Len(t, received, 2) + assert.EqualValues(t, expected[0].Object, received[0].Object) + assert.EqualValues(t, expected[0].ID, received[0].ID) + assert.EqualValues(t, expected[0].Deleted, received[0].Deleted) + assert.EqualValues(t, expected[1].Object, received[1].Object) + assert.EqualValues(t, expected[1].ID, received[1].ID) + assert.EqualValues(t, expected[1].Deleted, received[1].Deleted) + }) + + t.Run("when there is a nil object", func(t *testing.T) { + expected := replica.Replicas{ + { + Object: &obj1, + ID: obj1.ID(), + }, + { + Object: nil, + ID: obj2.ID(), + }, + } + + b, err := expected.MarshalBinary() + require.Nil(t, err) + + var received replica.Replicas + err = received.UnmarshalBinary(b) + require.Nil(t, err) + + assert.Len(t, received, 2) + assert.EqualValues(t, expected[0].Object, received[0].Object) + assert.EqualValues(t, expected[0].ID, received[0].ID) + assert.EqualValues(t, expected[0].Deleted, received[0].Deleted) + assert.EqualValues(t, expected[1].Object, received[1].Object) + assert.EqualValues(t, expected[1].ID, received[1].ID) + assert.EqualValues(t, expected[1].Deleted, received[1].Deleted) + }) + }) + } +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/replica/replicator.go b/platform/dbops/binaries/weaviate-src/usecases/replica/replicator.go new file mode 100644 index 0000000000000000000000000000000000000000..d74db0612515b8bd1c5801d4b56c0e161da1bcdc --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/replica/replicator.go @@ -0,0 +1,346 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replica + +import ( + "context" + "errors" + "fmt" + "sync/atomic" + "time" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus" + + "github.com/weaviate/weaviate/cluster/router/types" + "github.com/weaviate/weaviate/entities/storobj" + "github.com/weaviate/weaviate/usecases/objects" +) + +// opID operation encode as and int +type opID int + +const ( + opPutObject opID = iota + 1 + opMergeObject + opDeleteObject + + opPutObjects = iota + 97 + opAddReferences + opDeleteObjects +) + +type ( + router interface { + BuildRoutingPlanOptions(tenant, shard string, cl types.ConsistencyLevel, directCandidate string) types.RoutingPlanBuildOptions + BuildReadRoutingPlan(params types.RoutingPlanBuildOptions) (types.ReadRoutingPlan, error) + BuildWriteRoutingPlan(params types.RoutingPlanBuildOptions) (types.WriteRoutingPlan, error) + NodeHostname(nodeName string) (string, bool) + AllHostnames() []string + } + + // _Result represents a valid value or an error ( _ prevent make it public). + _Result[T any] struct { + Value T + Err error + } +) + +type Replicator struct { + class string + nodeName string + router router + client Client + log logrus.FieldLogger + requestCounter atomic.Uint64 + stream replicatorStream + *Finder +} + +func NewReplicator(className string, + router router, + nodeName string, + getDeletionStrategy func() string, + client Client, + l logrus.FieldLogger, +) *Replicator { + return &Replicator{ + class: className, + nodeName: nodeName, + router: router, + client: client, + log: l, + Finder: NewFinder( + className, + router, + nodeName, + client, + l, + defaultPullBackOffInitialInterval, + defaultPullBackOffMaxElapsedTime, + getDeletionStrategy, + ), + } +} + +func (r *Replicator) AllHostnames() []string { + return r.router.AllHostnames() +} + +func (r *Replicator) PutObject(ctx context.Context, + shard string, + obj *storobj.Object, + l types.ConsistencyLevel, + schemaVersion uint64, +) error { + coord := newCoordinator[SimpleResponse](r, shard, r.requestID(opPutObject), r.log) + isReady := func(ctx context.Context, host, requestID string) error { + resp, err := r.client.PutObject(ctx, host, r.class, shard, requestID, obj, schemaVersion) + if err == nil { + err = resp.FirstError() + } + if err != nil { + return fmt.Errorf("%q: %w", host, err) + } + return nil + } + replyCh, level, err := coord.Push(ctx, l, isReady, r.simpleCommit(shard)) + if err != nil { + r.log.WithField("op", "push.one").WithField("class", r.class). + WithField("shard", shard).Error(err) + return fmt.Errorf("%s %q: %w", MsgCLevel, l, ErrReplicas) + + } + err = r.stream.readErrors(1, level, replyCh)[0] + if err != nil { + r.log.WithField("op", "put").WithField("class", r.class). + WithField("shard", shard).WithField("uuid", obj.ID()).Error(err) + } + return err +} + +func (r *Replicator) MergeObject(ctx context.Context, + shard string, + doc *objects.MergeDocument, + l types.ConsistencyLevel, + schemaVersion uint64, +) error { + coord := newCoordinator[SimpleResponse](r, shard, r.requestID(opMergeObject), r.log) + op := func(ctx context.Context, host, requestID string) error { + resp, err := r.client.MergeObject(ctx, host, r.class, shard, requestID, doc, schemaVersion) + if err == nil { + err = resp.FirstError() + } + if err != nil { + return fmt.Errorf("%q: %w", host, err) + } + return nil + } + replyCh, level, err := coord.Push(ctx, l, op, r.simpleCommit(shard)) + if err != nil { + r.log.WithField("op", "push.merge").WithField("class", r.class). + WithField("shard", shard).Error(err) + return fmt.Errorf("%s %q: %w", MsgCLevel, l, ErrReplicas) + } + err = r.stream.readErrors(1, level, replyCh)[0] + if err != nil { + r.log.WithField("op", "merge").WithField("class", r.class). + WithField("shard", shard).WithField("uuid", doc.ID).Error(err) + var replicaErr *Error + if errors.As(err, &replicaErr) && replicaErr != nil && replicaErr.Code == StatusObjectNotFound { + return objects.NewErrDirtyWriteOfDeletedObject(replicaErr) + } + } + return err +} + +func (r *Replicator) DeleteObject(ctx context.Context, + shard string, + id strfmt.UUID, + deletionTime time.Time, + l types.ConsistencyLevel, + schemaVersion uint64, +) error { + coord := newCoordinator[SimpleResponse](r, shard, r.requestID(opDeleteObject), r.log) + op := func(ctx context.Context, host, requestID string) error { + resp, err := r.client.DeleteObject(ctx, host, r.class, shard, requestID, id, deletionTime, schemaVersion) + if err == nil { + err = resp.FirstError() + } + if err != nil { + return fmt.Errorf("%q: %w", host, err) + } + return nil + } + replyCh, level, err := coord.Push(ctx, l, op, r.simpleCommit(shard)) + if err != nil { + r.log.WithField("op", "push.delete").WithField("class", r.class). + WithField("shard", shard).Error(err) + return fmt.Errorf("%s %q: %w", MsgCLevel, l, ErrReplicas) + } + err = r.stream.readErrors(1, level, replyCh)[0] + if err != nil { + r.log.WithField("op", "put").WithField("class", r.class). + WithField("shard", shard).WithField("uuid", id).Error(err) + } + return err +} + +func (r *Replicator) PutObjects(ctx context.Context, + shard string, + objs []*storobj.Object, + l types.ConsistencyLevel, + schemaVersion uint64, +) []error { + coord := newCoordinator[SimpleResponse](r, shard, r.requestID(opPutObjects), r.log) + op := func(ctx context.Context, host, requestID string) error { + resp, err := r.client.PutObjects(ctx, host, r.class, shard, requestID, objs, schemaVersion) + if err == nil { + err = resp.FirstError() + } + if err != nil { + return fmt.Errorf("%q: %w", host, err) + } + return nil + } + + replyCh, level, err := coord.Push(ctx, l, op, r.simpleCommit(shard)) + if err != nil { + r.log.WithField("op", "push.many").WithField("class", r.class). + WithField("shard", shard).Error(err) + err = fmt.Errorf("%s %q: %w", MsgCLevel, l, ErrReplicas) + errs := make([]error, len(objs)) + for i := 0; i < len(objs); i++ { + errs[i] = err + } + return errs + } + errs := r.stream.readErrors(len(objs), level, replyCh) + if err := firstError(errs); err != nil { + r.log.WithField("op", "put.many").WithField("class", r.class). + WithField("shard", shard).Error(errs) + } + return errs +} + +func (r *Replicator) DeleteObjects(ctx context.Context, + shard string, + uuids []strfmt.UUID, + deletionTime time.Time, + dryRun bool, + l types.ConsistencyLevel, + schemaVersion uint64, +) []objects.BatchSimpleObject { + coord := newCoordinator[DeleteBatchResponse](r, shard, r.requestID(opDeleteObjects), r.log) + op := func(ctx context.Context, host, requestID string) error { + resp, err := r.client.DeleteObjects(ctx, host, r.class, shard, requestID, uuids, deletionTime, dryRun, schemaVersion) + if err == nil { + err = resp.FirstError() + } + if err != nil { + return fmt.Errorf("%q: %w", host, err) + } + return nil + } + commit := func(ctx context.Context, host, requestID string) (DeleteBatchResponse, error) { + resp := DeleteBatchResponse{} + err := r.client.Commit(ctx, host, r.class, shard, requestID, &resp) + if err == nil { + err = resp.FirstError() + } + if err != nil { + err = fmt.Errorf("%q: %w", host, err) + } + return resp, err + } + + replyCh, level, err := coord.Push(ctx, l, op, commit) + if err != nil { + r.log.WithField("op", "push.deletes").WithField("class", r.class). + WithField("shard", shard).Error(err) + err = fmt.Errorf("%s %q: %w", MsgCLevel, l, ErrReplicas) + errs := make([]objects.BatchSimpleObject, len(uuids)) + for i := 0; i < len(uuids); i++ { + errs[i].Err = err + } + return errs + } + rs := r.stream.readDeletions(len(uuids), level, replyCh) + if err := firstBatchError(rs); err != nil { + r.log.WithField("op", "put.deletes").WithField("class", r.class). + WithField("shard", shard).Error(rs) + } + return rs +} + +func (r *Replicator) AddReferences(ctx context.Context, + shard string, + refs []objects.BatchReference, + l types.ConsistencyLevel, + schemaVersion uint64, +) []error { + coord := newCoordinator[SimpleResponse](r, shard, r.requestID(opAddReferences), r.log) + op := func(ctx context.Context, host, requestID string) error { + resp, err := r.client.AddReferences(ctx, host, r.class, shard, requestID, refs, schemaVersion) + if err == nil { + err = resp.FirstError() + } + if err != nil { + return fmt.Errorf("%q: %w", host, err) + } + return nil + } + replyCh, level, err := coord.Push(ctx, l, op, r.simpleCommit(shard)) + if err != nil { + r.log.WithField("op", "push.refs").WithField("class", r.class). + WithField("shard", shard).Error(err) + err = fmt.Errorf("%s %q: %w", MsgCLevel, l, ErrReplicas) + errs := make([]error, len(refs)) + for i := 0; i < len(refs); i++ { + errs[i] = err + } + return errs + } + errs := r.stream.readErrors(len(refs), level, replyCh) + if err := firstError(errs); err != nil { + r.log.WithField("op", "put.refs").WithField("class", r.class). + WithField("shard", shard).Error(errs) + } + return errs +} + +// simpleCommit generate commit function for the coordinator +func (r *Replicator) simpleCommit(shard string) commitOp[SimpleResponse] { + return func(ctx context.Context, host, requestID string) (SimpleResponse, error) { + resp := SimpleResponse{} + err := r.client.Commit(ctx, host, r.class, shard, requestID, &resp) + if err == nil { + err = resp.FirstError() + } + if err != nil { + err = fmt.Errorf("%s: %w", host, err) + } + return resp, err + } +} + +// requestID returns ID as [CoordinatorName-OpCode-TimeStamp-Counter]. +// The coordinator uses it to uniquely identify a transaction. +// ID makes the request observable in the cluster by specifying its origin +// and the kind of replication request. +func (r *Replicator) requestID(op opID) string { + return fmt.Sprintf("%s-%.2x-%x-%x", + r.nodeName, + op, + time.Now().UnixMilli(), + r.requestCounter.Add(1)) +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/replica/replicator_stream.go b/platform/dbops/binaries/weaviate-src/usecases/replica/replicator_stream.go new file mode 100644 index 0000000000000000000000000000000000000000..5a466c034b6d70d8a06dd095eec22213a2866e46 --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/replica/replicator_stream.go @@ -0,0 +1,160 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replica + +import ( + "fmt" + + "github.com/go-openapi/strfmt" + "github.com/weaviate/weaviate/usecases/objects" +) + +type ( + // replicatorStream represents an incoming stream of responses + // to replication requests sent to replicas + replicatorStream struct{} +) + +// readErrors reads errors from incoming responses. +// It returns as soon as the specified consistency level l has been reached +func (r replicatorStream) readErrors(batchSize int, + level int, + ch <-chan _Result[SimpleResponse], +) []error { + urs := make([]SimpleResponse, 0, level) + var firstError error + for x := range ch { + if x.Err != nil { + urs = append(urs, x.Value) + if len(x.Value.Errors) == 0 && firstError == nil { + firstError = x.Err + } + } else { + level-- + if level == 0 { // consistency level reached + return make([]error, batchSize) + } + } + } + if level > 0 && firstError == nil { + firstError = fmt.Errorf("broadcast: %w", ErrReplicas) + } + return r.flattenErrors(batchSize, urs, firstError) +} + +// readDeletions reads deletion results from incoming responses. +// It returns as soon as the specified consistency level l has been reached +func (r replicatorStream) readDeletions(batchSize int, + level int, + ch <-chan _Result[DeleteBatchResponse], +) []objects.BatchSimpleObject { + rs := make([]DeleteBatchResponse, 0, level) + urs := make([]DeleteBatchResponse, 0, level) + var firstError error + for x := range ch { + if x.Err != nil { + urs = append(urs, x.Value) + if len(x.Value.Batch) == 0 && firstError == nil { + firstError = x.Err + } + } else { + level-- + rs = append(rs, x.Value) + if level == 0 { // consistency level reached + return r.flattenDeletions(batchSize, rs, nil) + } + } + } + if level > 0 && firstError == nil { + firstError = fmt.Errorf("broadcast: %w", ErrReplicas) + } + urs = append(urs, rs...) + return r.flattenDeletions(batchSize, urs, firstError) +} + +// flattenErrors extracts errors from responses + +func (replicatorStream) flattenErrors(batchSize int, + rs []SimpleResponse, + defaultErr error, +) []error { + errs := make([]error, batchSize) + n := 0 + for _, resp := range rs { + if len(resp.Errors) != batchSize { + continue + } + n++ + for i, err := range resp.Errors { + if !err.Empty() && errs[i] == nil { + errs[i] = err.Clone() + } + } + } + if n == 0 || n != len(rs) { + for i := range errs { + if errs[i] == nil { + errs[i] = defaultErr + } + } + } + return errs +} + +// flattenDeletions extracts deletion results from responses +func (replicatorStream) flattenDeletions(batchSize int, + rs []DeleteBatchResponse, + defaultErr error, +) []objects.BatchSimpleObject { + ret := make([]objects.BatchSimpleObject, batchSize) + n := 0 + for _, resp := range rs { + if len(resp.Batch) != batchSize { + continue + } + n++ + for i, x := range resp.Batch { + if !x.Error.Empty() && ret[i].Err == nil { + ret[i].Err = x.Error.Clone() + } + if ret[i].UUID == "" && x.UUID != "" { + ret[i].UUID = strfmt.UUID(x.UUID) + } + } + } + if n == 0 || n != len(rs) { + for i := range ret { + if ret[i].Err == nil { + ret[i].Err = defaultErr + } + } + } + return ret +} + +func firstError(es []error) error { + for _, e := range es { + if e != nil { + return e + } + } + return nil +} + +func firstBatchError(xs []objects.BatchSimpleObject) error { + for _, x := range xs { + if x.Err != nil { + return x.Err + } + } + return nil +} diff --git a/platform/dbops/binaries/weaviate-src/usecases/replica/replicator_test.go b/platform/dbops/binaries/weaviate-src/usecases/replica/replicator_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b6fbdd0dfe6649477e24469b61979afee9786eea --- /dev/null +++ b/platform/dbops/binaries/weaviate-src/usecases/replica/replicator_test.go @@ -0,0 +1,993 @@ +// _ _ +// __ _____ __ ___ ___ __ _| |_ ___ +// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \ +// \ V V / __/ (_| |\ V /| | (_| | || __/ +// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___| +// +// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved. +// +// CONTACT: hello@weaviate.io +// + +package replica_test + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/weaviate/weaviate/usecases/schema" + + "github.com/weaviate/weaviate/usecases/replica" + "github.com/weaviate/weaviate/usecases/sharding" + "github.com/weaviate/weaviate/usecases/sharding/config" + + "github.com/go-openapi/strfmt" + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + replicationTypes "github.com/weaviate/weaviate/cluster/replication/types" + clusterRouter "github.com/weaviate/weaviate/cluster/router" + "github.com/weaviate/weaviate/cluster/router/types" + "github.com/weaviate/weaviate/entities/models" + "github.com/weaviate/weaviate/entities/storobj" + clusterMocks "github.com/weaviate/weaviate/usecases/cluster/mocks" + "github.com/weaviate/weaviate/usecases/objects" + "golang.org/x/exp/slices" +) + +var ( + anyVal = mock.Anything + errAny = errors.New("any error") +) + +func TestReplicatorReplicaNotFound(t *testing.T) { + ctx := context.Background() + + testCases := []struct { + variant string + isMultiTenant bool + }{ + {"MultiTenant", true}, + {"SingleTenant", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("PutObject_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", "S", []string{}, tc.isMultiTenant) + rep := f.newReplicator() + err := rep.PutObject(ctx, "S", nil, types.ConsistencyLevelAll, 0) + assert.ErrorIs(t, err, replica.ErrReplicas) + }) + + t.Run(fmt.Sprintf("MergeObject_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", "S", []string{}, tc.isMultiTenant) + rep := f.newReplicator() + err := rep.MergeObject(ctx, "S", nil, types.ConsistencyLevelAll, 0) + assert.ErrorIs(t, err, replica.ErrReplicas) + }) + + t.Run(fmt.Sprintf("DeleteObject_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", "S", []string{}, tc.isMultiTenant) + rep := f.newReplicator() + err := rep.DeleteObject(ctx, "S", "id", time.Now(), types.ConsistencyLevelAll, 0) + assert.ErrorIs(t, err, replica.ErrReplicas) + }) + + t.Run(fmt.Sprintf("PutObjects_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", "S", []string{}, tc.isMultiTenant) + rep := f.newReplicator() + errs := rep.PutObjects(ctx, "S", []*storobj.Object{{}, {}}, types.ConsistencyLevelAll, 0) + assert.Equal(t, 2, len(errs)) + for _, err := range errs { + assert.ErrorIs(t, err, replica.ErrReplicas) + } + }) + + t.Run(fmt.Sprintf("DeleteObjects_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", "S", []string{}, tc.isMultiTenant) + rep := f.newReplicator() + xs := rep.DeleteObjects(ctx, "S", []strfmt.UUID{strfmt.UUID("1"), strfmt.UUID("2"), strfmt.UUID("3")}, time.Now(), false, types.ConsistencyLevelAll, 0) + assert.Equal(t, 3, len(xs)) + for _, x := range xs { + assert.ErrorIs(t, x.Err, replica.ErrReplicas) + } + }) + + t.Run(fmt.Sprintf("AddReferences_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", "S", []string{}, tc.isMultiTenant) + rep := f.newReplicator() + errs := rep.AddReferences(ctx, "S", []objects.BatchReference{{}, {}}, types.ConsistencyLevelAll, 0) + assert.Equal(t, 2, len(errs)) + for _, err := range errs { + assert.ErrorIs(t, err, replica.ErrReplicas) + } + }) + } +} + +func TestReplicatorPutObject(t *testing.T) { + var ( + cls = "C1" + shard = "SH1" + nodes = []string{"A", "B"} + ctx = context.Background() + obj = &storobj.Object{} + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + {"MultiTenant", true}, + {"SingleTenant", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("SuccessWithConsistencyLevelAll_%v", tc.variant), func(t *testing.T) { + nodes := []string{"A", "B", "C"} + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + resp := replica.SimpleResponse{} + for _, n := range nodes { + f.WClient.On("PutObject", mock.Anything, n, cls, shard, anyVal, obj, uint64(123)).Return(resp, nil) + f.WClient.On("Commit", ctx, n, "C1", shard, anyVal, anyVal).Return(nil) + } + err := rep.PutObject(ctx, shard, obj, types.ConsistencyLevelAll, 123) + assert.Nil(t, err) + }) + + t.Run(fmt.Sprintf("SuccessWithConsistencyLevelOne_%v", tc.variant), func(t *testing.T) { + nodes := []string{"A", "B", "C"} + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + resp := replica.SimpleResponse{} + + f.WClient.On("PutObject", mock.Anything, "A", cls, shard, anyVal, obj, uint64(123)).Return(resp, errAny).After(time.Second * 10) + f.WClient.On("Abort", mock.Anything, "A", cls, shard, anyVal).Return(resp, nil) + + f.WClient.On("PutObject", mock.Anything, "B", cls, shard, anyVal, obj, uint64(123)).Return(resp, nil) + f.WClient.On("Commit", ctx, "B", cls, shard, anyVal, anyVal).Return(errAny) + + f.WClient.On("PutObject", mock.Anything, "C", cls, shard, anyVal, obj, uint64(123)).Return(resp, nil) + f.WClient.On("Commit", ctx, "C", cls, shard, anyVal, anyVal).Return(nil) + err := rep.PutObject(ctx, shard, obj, types.ConsistencyLevelOne, 123) + assert.Nil(t, err) + }) + + t.Run(fmt.Sprintf("SuccessWithConsistencyLevelQuorum_%v", tc.variant), func(t *testing.T) { + nodes := []string{"A", "B", "C"} + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + resp := replica.SimpleResponse{} + for _, n := range nodes[:2] { + f.WClient.On("PutObject", mock.Anything, n, cls, shard, anyVal, obj, uint64(123)).Return(resp, nil) + f.WClient.On("Commit", ctx, n, "C1", shard, anyVal, anyVal).Return(nil) + } + f.WClient.On("PutObject", mock.Anything, "C", cls, shard, anyVal, obj, uint64(123)).Return(resp, nil) + f.WClient.On("Commit", ctx, "C", cls, shard, anyVal, anyVal).Return(nil).RunFn = func(a mock.Arguments) { + resp := a[5].(*replica.SimpleResponse) + *resp = replica.SimpleResponse{Errors: []replica.Error{{Msg: "e3"}}} + } + err := rep.PutObject(ctx, shard, obj, types.ConsistencyLevelQuorum, 123) + assert.Nil(t, err) + }) + + t.Run(fmt.Sprintf("SuccessWithConsistencyLevelQuorumDifferentSourceNode_%v", tc.variant), func(t *testing.T) { + nodesSrc := []string{"A", "B", "C"} + for _, sourceNode := range nodesSrc { + sourceNode := sourceNode + nodesCopy := make([]string, len(nodesSrc)) + copy(nodesCopy, nodesSrc) + t.Run(fmt.Sprintf("WithSourceNode=%s", sourceNode), func(t *testing.T) { + f := newFakeFactory(t, "C1", shard, nodesCopy, tc.isMultiTenant) + rep := f.newReplicatorWithSourceNode(sourceNode) + resp := replica.SimpleResponse{} + for _, n := range nodesCopy { + if n == sourceNode { + continue + } + f.WClient.On("PutObject", mock.Anything, n, cls, shard, anyVal, obj, uint64(123)).Return(resp, nil) + f.WClient.On("Commit", ctx, n, "C1", shard, anyVal, anyVal).Return(nil) + } + + // Craft a custom shard2replicas to emulate RF changing + // We always remove the source node from the replica set. This allows us to test the direct candidate logic when + // the direct candidate isn't part of the set of replica + f.Shard2replicas[shard] = slices.DeleteFunc(nodesCopy, func(n string) bool { return n == sourceNode }) + err := rep.PutObject(ctx, shard, obj, types.ConsistencyLevelQuorum, 123) + assert.Nil(t, err) + + f.WClient.AssertExpectations(t) + }) + } + }) + + t.Run(fmt.Sprintf("PhaseOneConnectionError_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + resp := replica.SimpleResponse{} + f.WClient.On("PutObject", mock.Anything, nodes[0], cls, shard, anyVal, obj, uint64(123)).Return(resp, nil) + f.WClient.On("PutObject", mock.Anything, nodes[1], cls, shard, anyVal, obj, uint64(123)).Return(resp, errAny) + f.WClient.On("Abort", mock.Anything, nodes[0], "C1", shard, anyVal).Return(resp, nil) + f.WClient.On("Abort", mock.Anything, nodes[1], "C1", shard, anyVal).Return(resp, nil) + + err := rep.PutObject(ctx, shard, obj, types.ConsistencyLevelAll, 123) + assert.ErrorIs(t, err, replica.ErrReplicas) + }) + + t.Run(fmt.Sprintf("PhaseOneUnsuccessfulResponse_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + resp := replica.SimpleResponse{} + f.WClient.On("PutObject", mock.Anything, nodes[0], cls, shard, anyVal, obj, uint64(123)).Return(resp, nil) + resp2 := replica.SimpleResponse{[]replica.Error{{Err: errAny}}} + f.WClient.On("PutObject", mock.Anything, nodes[1], cls, shard, anyVal, obj, uint64(123)).Return(resp2, nil) + f.WClient.On("Abort", mock.Anything, nodes[0], "C1", shard, anyVal).Return(resp, nil) + f.WClient.On("Abort", mock.Anything, nodes[1], "C1", shard, anyVal).Return(resp, nil) + + err := rep.PutObject(ctx, shard, obj, types.ConsistencyLevelAll, 123) + assert.ErrorIs(t, err, replica.ErrReplicas) + }) + + t.Run(fmt.Sprintf("Commit_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + resp := replica.SimpleResponse{} + for _, n := range nodes { + f.WClient.On("PutObject", mock.Anything, n, cls, shard, anyVal, obj, uint64(123)).Return(resp, nil) + } + f.WClient.On("Commit", ctx, nodes[0], "C1", shard, anyVal, anyVal).Return(nil) + f.WClient.On("Commit", ctx, nodes[1], "C1", shard, anyVal, anyVal).Return(errAny) + + err := rep.PutObject(ctx, shard, obj, types.ConsistencyLevelAll, 123) + assert.ErrorIs(t, err, errAny) + }) + } +} + +func TestReplicatorMergeObject(t *testing.T) { + var ( + cls = "C1" + shard = "SH1" + nodes = []string{"A", "B"} + ctx = context.Background() + merge = &objects.MergeDocument{} + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + {"MultiTenant", true}, + {"SingleTenant", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("Success_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + resp := replica.SimpleResponse{} + for _, n := range nodes { + f.WClient.On("MergeObject", mock.Anything, n, cls, shard, anyVal, merge, uint64(123)).Return(resp, nil) + f.WClient.On("Commit", ctx, n, cls, shard, anyVal, anyVal).Return(nil) + } + err := rep.MergeObject(ctx, shard, merge, types.ConsistencyLevelAll, 123) + assert.Nil(t, err) + }) + + t.Run(fmt.Sprintf("PhaseOneConnectionError_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + resp := replica.SimpleResponse{} + f.WClient.On("MergeObject", mock.Anything, nodes[0], cls, shard, anyVal, merge, uint64(123)).Return(resp, nil) + f.WClient.On("MergeObject", mock.Anything, nodes[1], cls, shard, anyVal, merge, uint64(123)).Return(resp, errAny) + f.WClient.On("Abort", mock.Anything, nodes[0], cls, shard, anyVal).Return(resp, nil) + f.WClient.On("Abort", mock.Anything, nodes[1], cls, shard, anyVal).Return(resp, nil) + + err := rep.MergeObject(ctx, shard, merge, types.ConsistencyLevelAll, 123) + assert.ErrorIs(t, err, replica.ErrReplicas) + }) + + t.Run(fmt.Sprintf("PhaseOneUnsuccessfulResponse_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + resp := replica.SimpleResponse{} + f.WClient.On("MergeObject", mock.Anything, nodes[0], cls, shard, anyVal, merge, uint64(123)).Return(resp, nil) + resp2 := replica.SimpleResponse{[]replica.Error{{Err: errAny}}} + f.WClient.On("MergeObject", mock.Anything, nodes[1], cls, shard, anyVal, merge, uint64(123)).Return(resp2, nil) + f.WClient.On("Abort", mock.Anything, nodes[0], cls, shard, anyVal).Return(resp, nil) + f.WClient.On("Abort", mock.Anything, nodes[1], cls, shard, anyVal).Return(resp, nil) + + err := rep.MergeObject(ctx, shard, merge, types.ConsistencyLevelAll, 123) + assert.ErrorIs(t, err, replica.ErrReplicas) + }) + + t.Run(fmt.Sprintf("Commit_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + resp := replica.SimpleResponse{} + for _, n := range nodes { + f.WClient.On("MergeObject", mock.Anything, n, cls, shard, anyVal, merge, uint64(123)).Return(resp, nil) + } + f.WClient.On("Commit", ctx, nodes[0], cls, shard, anyVal, anyVal).Return(nil) + f.WClient.On("Commit", ctx, nodes[1], cls, shard, anyVal, anyVal).Return(errAny) + + err := rep.MergeObject(ctx, shard, merge, types.ConsistencyLevelAll, 123) + assert.ErrorIs(t, err, errAny) + }) + } +} + +func TestReplicatorDeleteObject(t *testing.T) { + var ( + cls = "C1" + shard = "SH1" + nodes = []string{"A", "B", "C"} + uuid = strfmt.UUID("1234") + ctx = context.Background() + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + {"MultiTenant", true}, + {"SingleTenant", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("PhaseOneConnectionError_%v", tc.variant), func(t *testing.T) { + factory := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + client := factory.WClient + rep := factory.newReplicator() + resp := replica.SimpleResponse{Errors: make([]replica.Error, 1)} + for _, n := range nodes[:2] { + client.On("DeleteObject", mock.Anything, n, cls, shard, anyVal, uuid, anyVal, uint64(123)).Return(resp, nil) + client.On("Commit", ctx, n, "C1", shard, anyVal, anyVal).Return(nil) + } + client.On("DeleteObject", mock.Anything, "C", cls, shard, anyVal, uuid, anyVal, uint64(123)).Return(replica.SimpleResponse{}, errAny) + for _, n := range nodes { + client.On("Abort", mock.Anything, n, "C1", shard, anyVal).Return(resp, nil) + } + + err := rep.DeleteObject(ctx, shard, uuid, time.Now(), types.ConsistencyLevelAll, 123) + assert.NotNil(t, err) + assert.ErrorIs(t, err, replica.ErrReplicas) + }) + + t.Run(fmt.Sprintf("SuccessWithConsistencyLevelAll_%v", tc.variant), func(t *testing.T) { + factory := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + client := factory.WClient + rep := factory.newReplicator() + resp := replica.SimpleResponse{Errors: make([]replica.Error, 1)} + for _, n := range nodes { + client.On("DeleteObject", mock.Anything, n, cls, shard, anyVal, uuid, anyVal, uint64(123)).Return(resp, nil) + client.On("Commit", ctx, n, "C1", shard, anyVal, anyVal).Return(nil) + } + assert.Nil(t, rep.DeleteObject(ctx, shard, uuid, time.Now(), types.ConsistencyLevelAll, 123)) + assert.Nil(t, rep.DeleteObject(ctx, shard, uuid, time.Now(), types.ConsistencyLevelQuorum, 123)) + assert.Nil(t, rep.DeleteObject(ctx, shard, uuid, time.Now(), types.ConsistencyLevelOne, 123)) + }) + + t.Run(fmt.Sprintf("SuccessWithConsistencyQuorum_%v", tc.variant), func(t *testing.T) { + factory := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + client := factory.WClient + rep := factory.newReplicator() + resp := replica.SimpleResponse{Errors: make([]replica.Error, 1)} + for _, n := range nodes[:2] { + client.On("DeleteObject", mock.Anything, n, cls, shard, anyVal, uuid, anyVal, uint64(123)).Return(resp, nil) + client.On("Commit", ctx, n, "C1", shard, anyVal, anyVal).Return(nil).RunFn = func(a mock.Arguments) { + resp := a[5].(*replica.SimpleResponse) + *resp = replica.SimpleResponse{ + Errors: []replica.Error{{}}, + } + } + } + client.On("DeleteObject", mock.Anything, "C", cls, shard, anyVal, uuid, anyVal, uint64(123)).Return(resp, nil) + client.On("Commit", ctx, "C", "C1", shard, anyVal, anyVal).Return(nil).RunFn = func(a mock.Arguments) { + resp := a[5].(*replica.SimpleResponse) + *resp = replica.SimpleResponse{ + Errors: []replica.Error{{Msg: "e3"}}, + } + } + + assert.NotNil(t, rep.DeleteObject(ctx, shard, uuid, time.Now(), types.ConsistencyLevelAll, 123)) + assert.Nil(t, rep.DeleteObject(ctx, shard, uuid, time.Now(), types.ConsistencyLevelQuorum, 123)) + assert.Nil(t, rep.DeleteObject(ctx, shard, uuid, time.Now(), types.ConsistencyLevelOne, 123)) + }) + + t.Run(fmt.Sprintf("SuccessWithConsistencyQuorum2_%v", tc.variant), func(t *testing.T) { + factory := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + client := factory.WClient + rep := factory.newReplicator() + resp := replica.SimpleResponse{Errors: make([]replica.Error, 1)} + for _, n := range nodes[:2] { + client.On("DeleteObject", mock.Anything, n, cls, shard, anyVal, uuid, anyVal, uint64(123)).Return(resp, nil) + client.On("Commit", ctx, n, "C1", shard, anyVal, anyVal).Return(nil).RunFn = func(a mock.Arguments) { + resp := a[5].(*replica.SimpleResponse) + *resp = replica.SimpleResponse{ + Errors: []replica.Error{{}}, + } + } + } + client.On("DeleteObject", mock.Anything, "C", cls, shard, anyVal, uuid, anyVal, uint64(123)).Return(resp, nil) + client.On("Commit", ctx, "C", "C1", shard, anyVal, anyVal).Return(nil).RunFn = func(a mock.Arguments) { + resp := a[5].(*replica.SimpleResponse) + *resp = replica.SimpleResponse{ + Errors: []replica.Error{{Msg: "e3"}}, + } + } + + assert.NotNil(t, rep.DeleteObject(ctx, shard, uuid, time.Now(), types.ConsistencyLevelAll, 123)) + assert.Nil(t, rep.DeleteObject(ctx, shard, uuid, time.Now(), types.ConsistencyLevelQuorum, 123)) + assert.Nil(t, rep.DeleteObject(ctx, shard, uuid, time.Now(), types.ConsistencyLevelOne, 123)) + }) + } +} + +func TestReplicatorDeleteObjects(t *testing.T) { + var ( + cls = "C1" + shard = "SH1" + nodes = []string{"A", "B"} + ctx = context.Background() + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + {"MultiTenant", true}, + {"SingleTenant", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("PhaseOneConnectionError_%v", tc.variant), func(t *testing.T) { + factory := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + client := factory.WClient + docIDs := []strfmt.UUID{strfmt.UUID("1"), strfmt.UUID("2")} + client.On("DeleteObjects", mock.Anything, nodes[0], cls, shard, anyVal, docIDs, anyVal, false, uint64(123)).Return(replica.SimpleResponse{}, nil) + client.On("DeleteObjects", mock.Anything, nodes[1], cls, shard, anyVal, docIDs, anyVal, false, uint64(123)).Return(replica.SimpleResponse{}, errAny) + for _, n := range nodes { + client.On("Abort", mock.Anything, n, "C1", shard, anyVal).Return(replica.SimpleResponse{}, nil) + } + result := factory.newReplicator().DeleteObjects(ctx, shard, docIDs, time.Now(), false, types.ConsistencyLevelAll, 123) + assert.Equal(t, len(result), 2) + for _, r := range result { + assert.ErrorIs(t, r.Err, replica.ErrReplicas) + } + }) + + t.Run(fmt.Sprintf("PhaseTwoDecodingError_%v", tc.variant), func(t *testing.T) { + factory := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + client := factory.WClient + docIDs := []strfmt.UUID{strfmt.UUID("1"), strfmt.UUID("2")} + for _, n := range nodes { + client.On("DeleteObjects", mock.Anything, n, cls, shard, anyVal, docIDs, anyVal, false, uint64(123)).Return(replica.SimpleResponse{}, nil) + client.On("Commit", ctx, n, cls, shard, anyVal, anyVal).Return(errAny) + } + result := factory.newReplicator().DeleteObjects(ctx, shard, docIDs, time.Now(), false, types.ConsistencyLevelAll, 123) + assert.Equal(t, len(result), 2) + }) + + t.Run(fmt.Sprintf("PartialSuccess_%v", tc.variant), func(t *testing.T) { + factory := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + client := factory.WClient + rep := factory.newReplicator() + docIDs := []strfmt.UUID{strfmt.UUID("1"), strfmt.UUID("2")} + resp1 := replica.SimpleResponse{} + for _, n := range nodes { + client.On("DeleteObjects", mock.Anything, n, cls, shard, anyVal, docIDs, anyVal, false, uint64(123)).Return(resp1, nil) + client.On("Commit", ctx, n, cls, shard, anyVal, anyVal).Return(nil).RunFn = func(args mock.Arguments) { + resp := args[5].(*replica.DeleteBatchResponse) + *resp = replica.DeleteBatchResponse{ + Batch: []replica.UUID2Error{{"1", replica.Error{}}, {"2", replica.Error{Msg: "e1"}}}, + } + } + } + result := rep.DeleteObjects(ctx, shard, docIDs, time.Now(), false, types.ConsistencyLevelAll, 123) + assert.Equal(t, len(result), 2) + assert.Equal(t, objects.BatchSimpleObject{UUID: "1", Err: nil}, result[0]) + assert.Equal(t, objects.BatchSimpleObject{UUID: "2", Err: &replica.Error{Msg: "e1"}}, result[1]) + }) + + t.Run(fmt.Sprintf("SuccessWithConsistencyLevelAll_%v", tc.variant), func(t *testing.T) { + factory := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + client := factory.WClient + rep := factory.newReplicator() + docIDs := []strfmt.UUID{strfmt.UUID("1"), strfmt.UUID("2")} + resp1 := replica.SimpleResponse{} + for _, n := range nodes { + client.On("DeleteObjects", mock.Anything, n, cls, shard, anyVal, docIDs, anyVal, false, uint64(123)).Return(resp1, nil) + client.On("Commit", ctx, n, cls, shard, anyVal, anyVal).Return(nil).RunFn = func(args mock.Arguments) { + resp := args[5].(*replica.DeleteBatchResponse) + *resp = replica.DeleteBatchResponse{ + Batch: []replica.UUID2Error{{UUID: "1"}, {UUID: "2"}}, + } + } + } + result := rep.DeleteObjects(ctx, shard, docIDs, time.Now(), false, types.ConsistencyLevelAll, 123) + assert.Equal(t, len(result), 2) + assert.Equal(t, objects.BatchSimpleObject{UUID: "1", Err: nil}, result[0]) + assert.Equal(t, objects.BatchSimpleObject{UUID: "2", Err: nil}, result[1]) + }) + + t.Run(fmt.Sprintf("SuccessWithConsistencyLevelOne_%v", tc.variant), func(t *testing.T) { + factory := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + client := factory.WClient + rep := factory.newReplicator() + docIDs := []strfmt.UUID{strfmt.UUID("1"), strfmt.UUID("2")} + resp1 := replica.SimpleResponse{} + client.On("DeleteObjects", mock.Anything, nodes[0], cls, shard, anyVal, docIDs, anyVal, false, uint64(123)).Return(resp1, nil) + client.On("DeleteObjects", mock.Anything, nodes[1], cls, shard, anyVal, docIDs, anyVal, false, uint64(123)).Return(resp1, errAny) + client.On("Commit", ctx, nodes[0], cls, shard, anyVal, anyVal).Return(nil).RunFn = func(args mock.Arguments) { + resp := args[5].(*replica.DeleteBatchResponse) + *resp = replica.DeleteBatchResponse{ + Batch: []replica.UUID2Error{{UUID: "1"}, {UUID: "2"}}, + } + } + result := rep.DeleteObjects(ctx, shard, docIDs, time.Now(), false, types.ConsistencyLevelOne, 123) + assert.Equal(t, len(result), 2) + assert.Equal(t, []objects.BatchSimpleObject{{UUID: "1"}, {UUID: "2"}}, result) + }) + + t.Run(fmt.Sprintf("SuccessWithConsistencyQuorum_%v", tc.variant), func(t *testing.T) { + nodes = []string{"A", "B", "C"} + factory := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + client := factory.WClient + rep := factory.newReplicator() + docIDs := []strfmt.UUID{strfmt.UUID("1"), strfmt.UUID("2")} + resp1 := replica.SimpleResponse{} + for _, n := range nodes { + client.On("DeleteObjects", mock.Anything, n, cls, shard, anyVal, docIDs, anyVal, false, uint64(123)).Return(resp1, nil) + } + for _, n := range nodes[:2] { + client.On("Commit", ctx, n, cls, shard, anyVal, anyVal).Return(nil).RunFn = func(args mock.Arguments) { + resp := args[5].(*replica.DeleteBatchResponse) + *resp = replica.DeleteBatchResponse{ + Batch: []replica.UUID2Error{{UUID: "1"}, {UUID: "2"}}, + } + } + } + client.On("Commit", ctx, "C", cls, shard, anyVal, anyVal).Return(nil).RunFn = func(args mock.Arguments) { + resp := args[5].(*replica.DeleteBatchResponse) + *resp = replica.DeleteBatchResponse{ + Batch: []replica.UUID2Error{{UUID: "1"}, {UUID: "2", Error: replica.Error{Msg: "e2"}}}, + } + } + result := rep.DeleteObjects(ctx, shard, docIDs, time.Now(), false, types.ConsistencyLevelQuorum, 123) + assert.Equal(t, len(result), 2) + assert.Equal(t, []objects.BatchSimpleObject{{UUID: "1"}, {UUID: "2"}}, result) + }) + } +} + +func TestReplicatorPutObjects(t *testing.T) { + var ( + cls = "C1" + shard = "SH1" + nodes = []string{"A", "B"} + ctx = context.Background() + objs = []*storobj.Object{{}, {}, {}} + resp1 = replica.SimpleResponse{[]replica.Error{{}}} + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + {"MultiTenant", true}, + {"SingleTenant", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("SuccessWithConsistencyLevelAll_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + resp := replica.SimpleResponse{Errors: make([]replica.Error, 3)} + for _, n := range nodes { + f.WClient.On("PutObjects", mock.Anything, n, cls, shard, anyVal, objs, uint64(123)).Return(resp, nil) + f.WClient.On("Commit", ctx, n, cls, shard, anyVal, anyVal).Return(nil) + } + errs := rep.PutObjects(ctx, shard, objs, types.ConsistencyLevelAll, 123) + assert.Equal(t, []error{nil, nil, nil}, errs) + }) + + t.Run(fmt.Sprintf("SuccessWithConsistencyLevelOne_%v", tc.variant), func(t *testing.T) { + nodes := []string{"A", "B", "C"} + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + for _, n := range nodes[:2] { + f.WClient.On("PutObjects", mock.Anything, n, cls, shard, anyVal, objs, uint64(0)).Return(resp1, nil) + f.WClient.On("Commit", ctx, n, cls, shard, anyVal, anyVal).Return(nil).RunFn = func(a mock.Arguments) { + resp := a[5].(*replica.SimpleResponse) + *resp = replica.SimpleResponse{Errors: []replica.Error{{}, {}, {Msg: "e3"}}} + } + } + f.WClient.On("PutObjects", mock.Anything, "C", cls, shard, anyVal, objs, uint64(0)).Return(resp1, nil) + f.WClient.On("Commit", ctx, "C", cls, shard, anyVal, anyVal).Return(nil).RunFn = func(a mock.Arguments) { + resp := a[5].(*replica.SimpleResponse) + *resp = replica.SimpleResponse{Errors: make([]replica.Error, 3)} + } + errs := rep.PutObjects(ctx, shard, objs, types.ConsistencyLevelOne, 0) + assert.Equal(t, []error{nil, nil, nil}, errs) + }) + + t.Run(fmt.Sprintf("SuccessWithConsistencyLevelQuorum_%v", tc.variant), func(t *testing.T) { + nodes := []string{"A", "B", "C"} + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + for _, n := range nodes[:2] { + f.WClient.On("PutObjects", mock.Anything, n, cls, shard, anyVal, objs, uint64(0)).Return(resp1, nil) + f.WClient.On("Commit", ctx, n, cls, shard, anyVal, anyVal).Return(nil).RunFn = func(a mock.Arguments) { + resp := a[5].(*replica.SimpleResponse) + *resp = replica.SimpleResponse{Errors: []replica.Error{{}}} + } + } + f.WClient.On("PutObjects", mock.Anything, "C", cls, shard, anyVal, objs, uint64(0)).Return(resp1, nil) + f.WClient.On("Commit", ctx, "C", cls, shard, anyVal, anyVal).Return(nil).RunFn = func(a mock.Arguments) { + resp := a[5].(*replica.SimpleResponse) + *resp = replica.SimpleResponse{Errors: []replica.Error{{Msg: "e3"}}} + } + errs := rep.PutObjects(ctx, shard, objs, types.ConsistencyLevelQuorum, 0) + assert.Equal(t, []error{nil, nil, nil}, errs) + }) + + t.Run(fmt.Sprintf("PhaseOneConnectionError_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + f.WClient.On("PutObjects", mock.Anything, nodes[0], cls, shard, anyVal, objs, uint64(0)).Return(resp1, nil) + f.WClient.On("PutObjects", mock.Anything, nodes[1], cls, shard, anyVal, objs, uint64(0)).Return(resp1, errAny) + f.WClient.On("Abort", mock.Anything, nodes[0], "C1", shard, anyVal).Return(resp1, nil) + f.WClient.On("Abort", mock.Anything, nodes[1], "C1", shard, anyVal).Return(resp1, nil) + + errs := rep.PutObjects(ctx, shard, objs, types.ConsistencyLevelAll, 0) + assert.Equal(t, 3, len(errs)) + assert.ErrorIs(t, errs[0], replica.ErrReplicas) + }) + + t.Run(fmt.Sprintf("PhaseOneUnsuccessfulResponse_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + f.WClient.On("PutObjects", mock.Anything, nodes[0], cls, shard, anyVal, objs, uint64(0)).Return(resp1, nil) + resp2 := replica.SimpleResponse{[]replica.Error{{Msg: "E1"}, {Msg: "E2"}}} + f.WClient.On("PutObjects", mock.Anything, nodes[1], cls, shard, anyVal, objs, uint64(0)).Return(resp2, nil) + f.WClient.On("Abort", mock.Anything, nodes[0], "C1", shard, anyVal).Return(resp1, nil) + f.WClient.On("Abort", mock.Anything, nodes[1], "C1", shard, anyVal).Return(resp1, nil) + + errs := rep.PutObjects(ctx, shard, objs, types.ConsistencyLevelAll, 0) + assert.Equal(t, 3, len(errs)) + for _, err := range errs { + assert.ErrorIs(t, err, replica.ErrReplicas) + } + }) + + t.Run(fmt.Sprintf("PhaseTwoDecodingError_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, cls, shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + for _, n := range nodes { + f.WClient.On("PutObjects", mock.Anything, n, cls, shard, anyVal, objs, uint64(0)).Return(resp1, nil) + } + f.WClient.On("Commit", ctx, nodes[0], cls, shard, anyVal, anyVal).Return(nil).RunFn = func(a mock.Arguments) { + resp := a[5].(*replica.SimpleResponse) + *resp = replica.SimpleResponse{Errors: make([]replica.Error, 3)} + } + f.WClient.On("Commit", ctx, nodes[1], cls, shard, anyVal, anyVal).Return(errAny) + + errs := rep.PutObjects(ctx, shard, objs, types.ConsistencyLevelAll, 0) + assert.Equal(t, len(errs), 3) + assert.ErrorIs(t, errs[0], errAny) + assert.ErrorIs(t, errs[1], errAny) + assert.ErrorIs(t, errs[2], errAny) + }) + + t.Run(fmt.Sprintf("PhaseTwoUnsuccessfulResponse_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, cls, shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + node2Errs := []replica.Error{{Msg: "E1"}, {}, {Msg: "E3"}} + for _, n := range nodes { + f.WClient.On("PutObjects", mock.Anything, n, cls, shard, anyVal, objs, uint64(0)).Return(resp1, nil) + } + f.WClient.On("Commit", ctx, nodes[0], cls, shard, anyVal, anyVal).Return(nil).RunFn = func(a mock.Arguments) { + resp := a[5].(*replica.SimpleResponse) + *resp = replica.SimpleResponse{Errors: make([]replica.Error, 3)} + } + f.WClient.On("Commit", ctx, nodes[1], cls, shard, anyVal, anyVal).Return(errAny).RunFn = func(a mock.Arguments) { + resp := a[5].(*replica.SimpleResponse) + *resp = replica.SimpleResponse{Errors: node2Errs} + } + + errs := rep.PutObjects(ctx, shard, objs, types.ConsistencyLevelAll, 0) + assert.Equal(t, len(errs), len(objs)) + + wantError := []error{&node2Errs[0], nil, &node2Errs[2]} + assert.Equal(t, wantError, errs) + }) + } +} + +func TestReplicatorAddReferences(t *testing.T) { + var ( + cls = "C1" + shard = "SH1" + nodes = []string{"A", "B"} + ctx = context.Background() + refs = []objects.BatchReference{{}, {}} + ) + + testCases := []struct { + variant string + isMultiTenant bool + }{ + {"MultiTenant", true}, + {"SingleTenant", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("Success_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + resp := replica.SimpleResponse{} + for _, n := range nodes { + f.WClient.On("AddReferences", mock.Anything, n, cls, shard, anyVal, refs, uint64(123)).Return(resp, nil) + f.WClient.On("Commit", ctx, n, cls, shard, anyVal, anyVal).Return(nil) + } + errs := rep.AddReferences(ctx, shard, refs, types.ConsistencyLevelAll, 123) + assert.Equal(t, []error{nil, nil}, errs) + }) + + t.Run(fmt.Sprintf("PhaseOneConnectionError_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + resp := replica.SimpleResponse{} + f.WClient.On("AddReferences", mock.Anything, nodes[0], cls, shard, anyVal, refs, uint64(123)).Return(resp, nil) + f.WClient.On("AddReferences", mock.Anything, nodes[1], cls, shard, anyVal, refs, uint64(123)).Return(resp, errAny) + f.WClient.On("Abort", mock.Anything, nodes[0], "C1", shard, anyVal).Return(resp, nil) + f.WClient.On("Abort", mock.Anything, nodes[1], "C1", shard, anyVal).Return(resp, nil) + + errs := rep.AddReferences(ctx, shard, refs, types.ConsistencyLevelAll, 123) + assert.Equal(t, 2, len(errs)) + assert.ErrorIs(t, errs[0], replica.ErrReplicas) + }) + + t.Run(fmt.Sprintf("PhaseOneUnsuccessfulResponse_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, "C1", shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + resp := replica.SimpleResponse{} + f.WClient.On("AddReferences", mock.Anything, nodes[0], cls, shard, anyVal, refs, uint64(123)).Return(resp, nil) + resp2 := replica.SimpleResponse{[]replica.Error{{Msg: "E1"}, {Msg: "E2"}}} + f.WClient.On("AddReferences", mock.Anything, nodes[1], cls, shard, anyVal, refs, uint64(123)).Return(resp2, nil) + f.WClient.On("Abort", mock.Anything, nodes[0], "C1", shard, anyVal).Return(resp, nil) + f.WClient.On("Abort", mock.Anything, nodes[1], "C1", shard, anyVal).Return(resp, nil) + + errs := rep.AddReferences(ctx, shard, refs, types.ConsistencyLevelAll, 123) + assert.Equal(t, 2, len(errs)) + for _, err := range errs { + assert.ErrorIs(t, err, replica.ErrReplicas) + } + }) + + t.Run(fmt.Sprintf("Commit_%v", tc.variant), func(t *testing.T) { + f := newFakeFactory(t, cls, shard, nodes, tc.isMultiTenant) + rep := f.newReplicator() + resp := replica.SimpleResponse{} + for _, n := range nodes { + f.WClient.On("AddReferences", mock.Anything, n, cls, shard, anyVal, refs, uint64(123)).Return(resp, nil) + } + f.WClient.On("Commit", ctx, nodes[0], cls, shard, anyVal, anyVal).Return(nil) + f.WClient.On("Commit", ctx, nodes[1], cls, shard, anyVal, anyVal).Return(errAny) + + errs := rep.AddReferences(ctx, shard, refs, types.ConsistencyLevelAll, 123) + assert.Equal(t, len(errs), 2) + assert.ErrorIs(t, errs[0], errAny) + assert.ErrorIs(t, errs[1], errAny) + }) + } +} + +type fakeFactory struct { + t *testing.T + CLS string + Nodes []string + Shard2replicas map[string][]string + WClient *fakeClient + RClient *fakeRClient + log *logrus.Logger + hook *test.Hook + isMultiTenant bool +} + +func newFakeFactory(t *testing.T, class, shard string, nodes []string, isMultiTenant bool) *fakeFactory { + logger, hook := test.NewNullLogger() + + return &fakeFactory{ + t: t, + CLS: class, + Nodes: nodes, + Shard2replicas: map[string][]string{shard: nodes}, + WClient: &fakeClient{}, + RClient: &fakeRClient{}, + log: logger, + hook: hook, + isMultiTenant: isMultiTenant, + } +} + +func (f *fakeFactory) AddShard(shard string, nodes []string) { + f.Shard2replicas[shard] = nodes +} + +func (f *fakeFactory) newRouter(thisNode string) types.Router { + nodes := make([]string, 0, len(f.Nodes)) + for _, n := range f.Nodes { + if n == thisNode { + nodes = slices.Insert(nodes, 0, thisNode) + } else { + nodes = append(nodes, n) + } + } + clusterState := clusterMocks.NewMockNodeSelector(nodes...) + schemaGetterMock := schema.NewMockSchemaGetter(f.t) + schemaGetterMock.EXPECT().OptimisticTenantStatus(mock.Anything, mock.Anything, mock.Anything).RunAndReturn( + func(ctx context.Context, class string, tenant string) (map[string]string, error) { + return map[string]string{ + tenant: models.TenantActivityStatusHOT, + }, nil + }).Maybe() + + schemaReaderMock := schema.NewMockSchemaReader(f.t) + schemaReaderMock.EXPECT().Shards(mock.Anything).RunAndReturn(func(className string) ([]string, error) { + shards := make([]string, 0, len(f.Shard2replicas)) + for shard := range f.Shard2replicas { + shards = append(shards, shard) + } + return shards, nil + }).Maybe() + + schemaReaderMock.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error { + class := &models.Class{Class: className} + shardingState := f.createDynamicShardingState() + return readFunc(class, shardingState) + }).Maybe() + + schemaReaderMock.EXPECT().ShardReplicas(mock.Anything, mock.Anything).RunAndReturn(func(class string, shard string) ([]string, error) { + v, ok := f.Shard2replicas[shard] + if !ok { + return []string{}, fmt.Errorf("could not find node") + } + return v, nil + }).Maybe() + + replicationFsmMock := replicationTypes.NewMockReplicationFSMReader(f.t) + replicationFsmMock.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).RunAndReturn( + func(collection string, shard string, shardReplicasLocation []string) []string { + if replicas, ok := f.Shard2replicas[shard]; ok { + return replicas + } + return shardReplicasLocation + }).Maybe() + + replicationFsmMock.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).RunAndReturn( + func(collection string, shard string, shardReplicasLocation []string) ([]string, []string) { + if replicas, ok := f.Shard2replicas[shard]; ok { + return replicas, []string{} + } + return shardReplicasLocation, []string{} + }).Maybe() + + return clusterRouter.NewBuilder(f.CLS, f.isMultiTenant, clusterState, schemaGetterMock, schemaReaderMock, replicationFsmMock).Build() +} + +func (f *fakeFactory) createDynamicShardingState() *sharding.State { + shardingState := &sharding.State{ + IndexID: "idx-123", + Config: config.Config{}, + Physical: map[string]sharding.Physical{}, + Virtual: nil, + PartitioningEnabled: f.isMultiTenant, + } + + for shard, replicaNodes := range f.Shard2replicas { + physical := sharding.Physical{ + Name: shard, + BelongsToNodes: replicaNodes, + Status: models.TenantActivityStatusHOT, + } + shardingState.Physical[shard] = physical + } + return shardingState +} + +func (f *fakeFactory) newReplicatorWithSourceNode(thisNode string) *replica.Replicator { + router := f.newRouter(thisNode) + getDeletionStrategy := func() string { + return models.ReplicationConfigDeletionStrategyNoAutomatedResolution + } + return replica.NewReplicator( + f.CLS, + router, + "A", + getDeletionStrategy, + &struct { + replica.RClient + replica.WClient + }{f.RClient, f.WClient}, + f.log, + ) +} + +func (f *fakeFactory) newReplicator() *replica.Replicator { + router := f.newRouter("") + getDeletionStrategy := func() string { + return models.ReplicationConfigDeletionStrategyNoAutomatedResolution + } + return replica.NewReplicator( + f.CLS, + router, + "A", + getDeletionStrategy, + &struct { + replica.RClient + replica.WClient + }{f.RClient, f.WClient}, + f.log, + ) +} + +func (f *fakeFactory) newFinderWithTimings(thisNode string, tInitial time.Duration, tMax time.Duration) *replica.Finder { + router := f.newRouter(thisNode) + getDeletionStrategy := func() string { + return models.ReplicationConfigDeletionStrategyNoAutomatedResolution + } + return replica.NewFinder(f.CLS, router, thisNode, f.RClient, f.log, tInitial, tMax, getDeletionStrategy) +} + +func (f *fakeFactory) newFinder(thisNode string) *replica.Finder { + return f.newFinderWithTimings(thisNode, 1*time.Microsecond, 128*time.Millisecond) +} + +func (f *fakeFactory) assertLogContains(t *testing.T, key string, xs ...string) { + t.Helper() + // logging might happen after returning to the caller + // Therefore, we need to make sure that the goroutine + // running in the background is writing to the log + entry := f.hook.LastEntry() + for i := 0; entry == nil && i < 20; i++ { + <-time.After(time.Millisecond * 10) + entry = f.hook.LastEntry() + } + data := "" + if entry != nil { + data, _ = entry.Data[key].(string) + } else { + t.Errorf("log entry is empty") + return + } + for _, x := range xs { + assert.Contains(t, data, x) + } +} + +func (f *fakeFactory) assertLogErrorContains(t *testing.T, xs ...string) { + t.Helper() + // logging might happen after returning to the caller + // Therefore, we need to make sure that the goroutine + // running in the background is writing to the log + entry := f.hook.LastEntry() + for i := 0; entry == nil && i < 20; i++ { + <-time.After(time.Millisecond * 10) + entry = f.hook.LastEntry() + } + + if entry == nil { + t.Errorf("log entry is empty") + return + } + for _, x := range xs { + assert.Contains(t, entry.Message, x) + } +}