diff --git a/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/conv/casbin_types.go b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/conv/casbin_types.go
new file mode 100644
index 0000000000000000000000000000000000000000..85f056014afcab4f2c0649ce93b365497c02a879
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/conv/casbin_types.go
@@ -0,0 +1,545 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package conv
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/weaviate/weaviate/usecases/auth/authentication"
+
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/entities/schema"
+ "github.com/weaviate/weaviate/usecases/auth/authorization"
+)
+
+const (
+ // https://casbin.org/docs/rbac/#how-to-distinguish-role-from-user
+ // ROLE_NAME_PREFIX to prefix role to help casbin to distinguish on Enforcing
+ ROLE_NAME_PREFIX = "role" + PREFIX_SEPARATOR
+ // OIDC_GROUP_NAME_PREFIX to prefix role to help casbin to distinguish on Enforcing
+ OIDC_GROUP_NAME_PREFIX = "group" + PREFIX_SEPARATOR
+ PREFIX_SEPARATOR = ":"
+
+ // CRUD allow all actions on a resource
+ // this is internal for casbin to handle admin actions
+ CRUD = "(C)|(R)|(U)|(D)"
+ // CRU allow all actions on a resource except DELETE
+ // this is internal for casbin to handle editor actions
+ CRU = "(C)|(R)|(U)"
+ VALID_VERBS = "(C)|(R)|(U)|(D)|(A)"
+ // InternalPlaceHolder is a place holder to mark empty roles
+ InternalPlaceHolder = "wv_internal_empty"
+)
+
+var (
+ BuiltInPolicies = map[string]string{
+ authorization.Viewer: authorization.READ,
+ authorization.Admin: VALID_VERBS,
+ authorization.Root: VALID_VERBS,
+ authorization.ReadOnly: authorization.READ,
+ }
+ weaviate_actions_prefixes = map[string]string{
+ CRUD: "manage",
+ CRU: "manage",
+ authorization.ROLE_SCOPE_MATCH: "manage",
+ authorization.CREATE: "create",
+ authorization.READ: "read",
+ authorization.UPDATE: "update",
+ authorization.DELETE: "delete",
+ authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE: "assign_and_revoke",
+ }
+)
+
+var resourcePatterns = []string{
+ fmt.Sprintf(`^%s/.*$`, authorization.GroupsDomain),
+ fmt.Sprintf(`^%s/[^/]+$`, authorization.GroupsDomain),
+ fmt.Sprintf(`^%s/.*$`, authorization.UsersDomain),
+ fmt.Sprintf(`^%s/[^/]+$`, authorization.UsersDomain),
+ fmt.Sprintf(`^%s/.*$`, authorization.RolesDomain),
+ fmt.Sprintf(`^%s/[^/]+$`, authorization.RolesDomain),
+ fmt.Sprintf(`^%s/.*$`, authorization.ClusterDomain),
+ fmt.Sprintf(`^%s/verbosity/minimal$`, authorization.NodesDomain),
+ fmt.Sprintf(`^%s/verbosity/verbose/collections/[^/]+$`, authorization.NodesDomain),
+ fmt.Sprintf(`^%s/verbosity/verbose/collections/[^/]+$`, authorization.NodesDomain),
+ fmt.Sprintf(`^%s/collections/.*$`, authorization.BackupsDomain),
+ fmt.Sprintf(`^%s/collections/[^/]+$`, authorization.BackupsDomain),
+ fmt.Sprintf(`^%s/collections/.*$`, authorization.SchemaDomain),
+ fmt.Sprintf(`^%s/collections/[^/]+$`, authorization.SchemaDomain),
+ fmt.Sprintf(`^%s/collections/[^/]+/shards/.*$`, authorization.SchemaDomain),
+ fmt.Sprintf(`^%s/collections/[^/]+/shards/[^/]+/objects/.*$`, authorization.DataDomain),
+ fmt.Sprintf(`^%s/collections/[^/]+/shards/[^/]+/objects/[^/]+$`, authorization.DataDomain),
+ fmt.Sprintf(`^%s/collections/[^/]+/shards/[^/]+$`, authorization.ReplicateDomain),
+ fmt.Sprintf(`^%s/collections/[^/]+/aliases/[^/]+$`, authorization.AliasesDomain),
+}
+
+func newPolicy(policy []string) *authorization.Policy {
+ return &authorization.Policy{
+ Resource: fromCasbinResource(policy[1]),
+ Verb: policy[2],
+ Domain: policy[3],
+ }
+}
+
+func fromCasbinResource(resource string) string {
+ return strings.ReplaceAll(resource, ".*", "*")
+}
+
+func CasbinClusters() string {
+ return fmt.Sprintf("%s/.*", authorization.ClusterDomain)
+}
+
+func CasbinNodes(verbosity, class string) string {
+ class = schema.UppercaseClassesNames(class)[0]
+ if verbosity == "minimal" {
+ return fmt.Sprintf("%s/verbosity/minimal", authorization.NodesDomain)
+ }
+ if class == "" {
+ class = "*"
+ }
+ class = strings.ReplaceAll(class, "*", ".*")
+ return fmt.Sprintf("%s/verbosity/verbose/collections/%s", authorization.NodesDomain, class)
+}
+
+func CasbinBackups(class string) string {
+ class = schema.UppercaseClassesNames(class)[0]
+ if class == "" {
+ class = "*"
+ }
+ class = strings.ReplaceAll(class, "*", ".*")
+ return fmt.Sprintf("%s/collections/%s", authorization.BackupsDomain, class)
+}
+
+func CasbinUsers(user string) string {
+ if user == "" {
+ user = "*"
+ }
+ user = strings.ReplaceAll(user, "*", ".*")
+ return fmt.Sprintf("%s/%s", authorization.UsersDomain, user)
+}
+
+func CasbinGroups(group string, groupType string) string {
+ if group == "" {
+ group = "*"
+ }
+ group = strings.ReplaceAll(group, "*", ".*")
+ return fmt.Sprintf("%s/%s/%s", authorization.GroupsDomain, groupType, group)
+}
+
+func CasbinRoles(role string) string {
+ if role == "" {
+ role = "*"
+ }
+ role = strings.ReplaceAll(role, "*", ".*")
+ return fmt.Sprintf("%s/%s", authorization.RolesDomain, role)
+}
+
+func CasbinSchema(collection, shard string) string {
+ collection = schema.UppercaseClassesNames(collection)[0]
+ if collection == "" {
+ collection = "*"
+ }
+ if shard == "" {
+ shard = "*"
+ }
+ collection = strings.ReplaceAll(collection, "*", ".*")
+ shard = strings.ReplaceAll(shard, "*", ".*")
+ return fmt.Sprintf("%s/collections/%s/shards/%s", authorization.SchemaDomain, collection, shard)
+}
+
+func CasbinReplicate(collection, shard string) string {
+ collection = schema.UppercaseClassesNames(collection)[0]
+ if collection == "" {
+ collection = "*"
+ }
+ if shard == "" {
+ shard = "*"
+ }
+ collection = strings.ReplaceAll(collection, "*", ".*")
+ shard = strings.ReplaceAll(shard, "*", ".*")
+ return fmt.Sprintf("%s/collections/%s/shards/%s", authorization.ReplicateDomain, collection, shard)
+}
+
+func CasbinAliases(collection, alias string) string {
+ if collection == "" {
+ collection = "*"
+ }
+ if alias == "" {
+ alias = "*"
+ }
+ collection = strings.ReplaceAll(collection, "*", ".*")
+ alias = strings.ReplaceAll(alias, "*", ".*")
+ return fmt.Sprintf("%s/collections/%s/aliases/%s", authorization.AliasesDomain, collection, alias)
+}
+
+func CasbinData(collection, shard, object string) string {
+ collection = schema.UppercaseClassesNames(collection)[0]
+ if collection == "" {
+ collection = "*"
+ }
+ if shard == "" {
+ shard = "*"
+ }
+ if object == "" {
+ object = "*"
+ }
+ collection = strings.ReplaceAll(collection, "*", ".*")
+ shard = strings.ReplaceAll(shard, "*", ".*")
+ object = strings.ReplaceAll(object, "*", ".*")
+ return fmt.Sprintf("%s/collections/%s/shards/%s/objects/%s", authorization.DataDomain, collection, shard, object)
+}
+
+func extractFromExtAction(inputAction string) (string, string, error) {
+ splits := strings.Split(inputAction, "_")
+ if len(splits) < 2 {
+ return "", "", fmt.Errorf("invalid action: %s", inputAction)
+ }
+ domain := splits[len(splits)-1]
+ verb := strings.ToUpper(splits[0][:1])
+ if verb == "M" {
+ verb = CRUD
+ }
+
+ if !validVerb(verb) {
+ return "", "", fmt.Errorf("invalid verb: %s", verb)
+ }
+
+ return verb, domain, nil
+}
+
+// casbinPolicyDomains decouples the endpoints domains
+// from the casbin internal domains.
+// e.g.
+// [create_collections, create_tenants] -> schema domain
+func casbinPolicyDomains(domain string) string {
+ switch domain {
+ case authorization.CollectionsDomain, authorization.TenantsDomain:
+ return authorization.SchemaDomain
+ default:
+ return domain
+ }
+}
+
+func policy(permission *models.Permission) (*authorization.Policy, error) {
+ if permission.Action == nil {
+ return &authorization.Policy{Resource: InternalPlaceHolder}, nil
+ }
+
+ verb, domain, err := extractFromExtAction(*permission.Action)
+ if err != nil {
+ return nil, err
+ }
+
+ var resource string
+ switch domain {
+ case authorization.GroupsDomain:
+ group := "*"
+ if permission.Groups != nil {
+ if permission.Groups.Group != nil {
+ group = *permission.Groups.Group
+ }
+ if permission.Groups.GroupType != models.GroupTypeOidc {
+ return nil, fmt.Errorf("invalid groups type: %v", permission.Groups.GroupType)
+ }
+ } else {
+ return nil, fmt.Errorf("invalid permission: %v", permission)
+ }
+ resource = CasbinGroups(group, string(models.GroupTypeOidc))
+ case authorization.UsersDomain:
+ user := "*"
+ if permission.Users != nil && permission.Users.Users != nil {
+ user = *permission.Users.Users
+ }
+ resource = CasbinUsers(user)
+ case authorization.RolesDomain:
+ role := "*"
+ // default verb for role to handle cases where role is nil
+ origVerb := verb
+ verb = authorization.VerbWithScope(verb, authorization.ROLE_SCOPE_MATCH)
+ if permission.Roles != nil && permission.Roles.Role != nil {
+ role = *permission.Roles.Role
+ if permission.Roles.Scope != nil {
+ verb = authorization.VerbWithScope(origVerb, strings.ToUpper(*permission.Roles.Scope))
+ }
+ }
+ resource = CasbinRoles(role)
+ case authorization.ClusterDomain:
+ resource = CasbinClusters()
+ case authorization.CollectionsDomain:
+ collection := "*"
+ tenant := "#"
+ if permission.Collections != nil && permission.Collections.Collection != nil {
+ collection = schema.UppercaseClassName(*permission.Collections.Collection)
+ }
+ resource = CasbinSchema(collection, tenant)
+
+ case authorization.TenantsDomain:
+ collection := "*"
+ tenant := "*"
+ if permission.Tenants != nil {
+ if permission.Tenants.Collection != nil {
+ collection = schema.UppercaseClassName(*permission.Tenants.Collection)
+ }
+
+ if permission.Tenants.Tenant != nil {
+ tenant = *permission.Tenants.Tenant
+ }
+ }
+ resource = CasbinSchema(collection, tenant)
+ case authorization.DataDomain:
+ collection := "*"
+ tenant := "*"
+ object := "*"
+ if permission.Data != nil && permission.Data.Collection != nil {
+ collection = schema.UppercaseClassName(*permission.Data.Collection)
+ }
+ if permission.Data != nil && permission.Data.Tenant != nil {
+ tenant = *permission.Data.Tenant
+ }
+ if permission.Data != nil && permission.Data.Object != nil {
+ object = *permission.Data.Object
+ }
+ resource = CasbinData(collection, tenant, object)
+ case authorization.BackupsDomain:
+ collection := "*"
+ if permission.Backups != nil {
+ if permission.Backups.Collection != nil {
+ collection = schema.UppercaseClassName(*permission.Backups.Collection)
+ }
+ }
+ resource = CasbinBackups(collection)
+ case authorization.NodesDomain:
+ collection := "*"
+ verbosity := "minimal"
+ if permission.Nodes != nil {
+ if permission.Nodes.Collection != nil {
+ collection = schema.UppercaseClassName(*permission.Nodes.Collection)
+ }
+ if permission.Nodes.Verbosity != nil {
+ verbosity = *permission.Nodes.Verbosity
+ }
+ }
+ resource = CasbinNodes(verbosity, collection)
+ case authorization.ReplicateDomain:
+ collection := "*"
+ shard := "*"
+ if permission.Replicate != nil {
+ if permission.Replicate.Collection != nil {
+ collection = schema.UppercaseClassName(*permission.Replicate.Collection)
+ }
+ if permission.Replicate.Shard != nil {
+ shard = *permission.Replicate.Shard
+ }
+ }
+ resource = CasbinReplicate(collection, shard)
+ case authorization.AliasesDomain:
+ collection := "*"
+ alias := "*"
+ if permission.Aliases != nil {
+ if permission.Aliases.Collection != nil {
+ collection = schema.UppercaseClassName(*permission.Aliases.Collection)
+ }
+ if permission.Aliases.Alias != nil {
+ alias = schema.UppercaseClassName(*permission.Aliases.Alias)
+ }
+ }
+ resource = CasbinAliases(collection, alias)
+ default:
+ return nil, fmt.Errorf("invalid domain: %s", domain)
+
+ }
+ if !validResource(resource) {
+ return nil, fmt.Errorf("invalid resource: %s", resource)
+ }
+
+ return &authorization.Policy{
+ Resource: resource,
+ Verb: verb,
+ Domain: casbinPolicyDomains(domain),
+ }, nil
+}
+
+func weaviatePermissionAction(pathLastPart, verb, domain string) string {
+ action := fmt.Sprintf("%s_%s", weaviate_actions_prefixes[verb], domain)
+ action = strings.ReplaceAll(action, "_*", "")
+ switch domain {
+ case authorization.SchemaDomain:
+ if pathLastPart == "#" {
+ // e.g
+ // schema/collections/ABC/shards/# collection permission
+ // schema/collections/ABC/shards/* tenant permission
+ action = fmt.Sprintf("%s_%s", weaviate_actions_prefixes[verb], authorization.CollectionsDomain)
+ } else {
+ action = fmt.Sprintf("%s_%s", weaviate_actions_prefixes[verb], authorization.TenantsDomain)
+ }
+ return action
+ default:
+ return action
+ }
+}
+
+func permission(policy []string, validatePath bool) (*models.Permission, error) {
+ mapped := newPolicy(policy)
+
+ if mapped.Resource == InternalPlaceHolder {
+ return &models.Permission{}, nil
+ }
+
+ if !validVerb(mapped.Verb) {
+ return nil, fmt.Errorf("invalid verb: %s", mapped.Verb)
+ }
+
+ permission := &models.Permission{}
+
+ splits := strings.Split(mapped.Resource, "/")
+
+ // validating the resource can be expensive (regexp!)
+ if validatePath && !validResource(mapped.Resource) {
+ return nil, fmt.Errorf("invalid resource: %s", mapped.Resource)
+ }
+
+ switch mapped.Domain {
+ case authorization.SchemaDomain:
+ if splits[4] == "#" {
+ permission.Collections = &models.PermissionCollections{
+ Collection: &splits[2],
+ }
+ } else {
+ permission.Tenants = &models.PermissionTenants{
+ Collection: &splits[2],
+ Tenant: &splits[4],
+ }
+ }
+ case authorization.DataDomain:
+ permission.Data = &models.PermissionData{
+ Collection: &splits[2],
+ Tenant: &splits[4],
+ Object: &splits[6],
+ }
+ case authorization.RolesDomain:
+ permission.Roles = &models.PermissionRoles{
+ Role: &splits[1],
+ }
+
+ verbSplits := strings.Split(mapped.Verb, "_")
+ mapped.Verb = verbSplits[0]
+ scope := strings.ToLower(verbSplits[1])
+ permission.Roles.Scope = &scope
+
+ case authorization.NodesDomain:
+ verbosity := splits[2]
+ var collection *string
+ if verbosity == "minimal" {
+ collection = nil
+ } else {
+ collection = &splits[4]
+ }
+ permission.Nodes = &models.PermissionNodes{
+ Collection: collection,
+ Verbosity: &verbosity,
+ }
+ case authorization.BackupsDomain:
+ permission.Backups = &models.PermissionBackups{
+ Collection: &splits[2],
+ }
+ case authorization.UsersDomain:
+ permission.Users = &models.PermissionUsers{
+ Users: &splits[1],
+ }
+ case authorization.ReplicateDomain:
+ permission.Replicate = &models.PermissionReplicate{
+ Collection: &splits[2],
+ Shard: &splits[4],
+ }
+ case authorization.AliasesDomain:
+ permission.Aliases = &models.PermissionAliases{
+ Collection: &splits[2],
+ Alias: &splits[4],
+ }
+ case authorization.GroupsDomain:
+ permission.Groups = &models.PermissionGroups{
+ Group: &splits[2],
+ GroupType: models.GroupType(splits[1]),
+ }
+ case *authorization.All:
+ permission.Backups = authorization.AllBackups
+ permission.Data = authorization.AllData
+ permission.Nodes = authorization.AllNodes
+ permission.Roles = authorization.AllRoles
+ permission.Collections = authorization.AllCollections
+ permission.Tenants = authorization.AllTenants
+ permission.Users = authorization.AllUsers
+ permission.Replicate = authorization.AllReplicate
+ permission.Aliases = authorization.AllAliases
+ permission.Groups = authorization.AllOIDCGroups
+ case authorization.ClusterDomain:
+ // do nothing
+ default:
+ return nil, fmt.Errorf("invalid domain: %s", mapped.Domain)
+ }
+
+ permission.Action = authorization.String(weaviatePermissionAction(splits[len(splits)-1], mapped.Verb, mapped.Domain))
+ return permission, nil
+}
+
+func validResource(input string) bool {
+ for _, pattern := range resourcePatterns {
+ matched, err := regexp.MatchString(pattern, input)
+ if err != nil {
+ return false
+ }
+ if matched {
+ return true
+ }
+ }
+ return false
+}
+
+func validVerb(input string) bool {
+ return regexp.MustCompile(VALID_VERBS).MatchString(input)
+}
+
+func PrefixRoleName(name string) string {
+ if strings.HasPrefix(name, ROLE_NAME_PREFIX) {
+ return name
+ }
+ return fmt.Sprintf("%s%s", ROLE_NAME_PREFIX, name)
+}
+
+func PrefixGroupName(name string) string {
+ if strings.HasPrefix(name, OIDC_GROUP_NAME_PREFIX) {
+ return name
+ }
+ return fmt.Sprintf("%s%s", OIDC_GROUP_NAME_PREFIX, name)
+}
+
+func NameHasPrefix(name string) bool {
+ return strings.Contains(name, PREFIX_SEPARATOR)
+}
+
+func UserNameWithTypeFromPrincipal(principal *models.Principal) string {
+ return fmt.Sprintf("%s:%s", principal.UserType, principal.Username)
+}
+
+func UserNameWithTypeFromId(username string, authType authentication.AuthType) string {
+ return fmt.Sprintf("%s:%s", authType, username)
+}
+
+func TrimRoleNamePrefix(name string) string {
+ return strings.TrimPrefix(name, ROLE_NAME_PREFIX)
+}
+
+func GetUserAndPrefix(name string) (string, string) {
+ splits := strings.Split(name, PREFIX_SEPARATOR)
+ return splits[1], splits[0]
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/conv/casbin_types_test.go b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/conv/casbin_types_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..edcd5eb09ae4706a7e3fcddf0a99c5f77e57053c
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/conv/casbin_types_test.go
@@ -0,0 +1,1160 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package conv
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/weaviate/weaviate/usecases/auth/authentication"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/usecases/auth/authorization"
+)
+
+type innerTest struct {
+ permissionAction string
+ testDescription string
+ policyVerb string
+}
+
+var (
+ foo = authorization.String("Foo")
+ bar = authorization.String("bar")
+ baz = authorization.String("baz")
+
+ createDesc = "create"
+ readDesc = "read"
+ updateDesc = "update"
+ deleteDesc = "delete"
+ manageDesc = "manage"
+
+ createVerb = authorization.CREATE
+ readVerb = authorization.READ
+ updateVerb = authorization.UPDATE
+ deleteVerb = authorization.DELETE
+ assignAndRevokeVerb = authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE
+ manageVerb = CRUD
+
+ rolesTestsR = []innerTest{
+ {permissionAction: authorization.ReadRoles, testDescription: readDesc, policyVerb: authorization.VerbWithScope(readVerb, authorization.ROLE_SCOPE_MATCH)},
+ }
+ rolesTestsCUD = []innerTest{
+ {permissionAction: authorization.CreateRoles, testDescription: createVerb, policyVerb: authorization.VerbWithScope(createVerb, authorization.ROLE_SCOPE_ALL)},
+ {permissionAction: authorization.UpdateRoles, testDescription: updateDesc, policyVerb: authorization.VerbWithScope(updateVerb, authorization.ROLE_SCOPE_ALL)},
+ {permissionAction: authorization.DeleteRoles, testDescription: deleteDesc, policyVerb: authorization.VerbWithScope(deleteVerb, authorization.ROLE_SCOPE_ALL)},
+ }
+ clusterTests = []innerTest{
+ {permissionAction: authorization.ReadCluster, testDescription: readDesc, policyVerb: readVerb},
+ }
+ nodesTests = []innerTest{
+ {permissionAction: authorization.ReadNodes, testDescription: readDesc, policyVerb: readVerb},
+ }
+ backupsTests = []innerTest{
+ {permissionAction: authorization.ManageBackups, testDescription: manageDesc, policyVerb: manageVerb},
+ }
+ collectionsTests = []innerTest{
+ {permissionAction: authorization.CreateCollections, testDescription: createDesc, policyVerb: createVerb},
+ {permissionAction: authorization.ReadCollections, testDescription: readDesc, policyVerb: readVerb},
+ {permissionAction: authorization.UpdateCollections, testDescription: updateDesc, policyVerb: updateVerb},
+ {permissionAction: authorization.DeleteCollections, testDescription: deleteDesc, policyVerb: deleteVerb},
+ }
+ objectsDataTests = []innerTest{
+ {permissionAction: authorization.CreateData, testDescription: createDesc, policyVerb: createVerb},
+ {permissionAction: authorization.ReadData, testDescription: readDesc, policyVerb: readVerb},
+ {permissionAction: authorization.UpdateData, testDescription: updateDesc, policyVerb: updateVerb},
+ {permissionAction: authorization.DeleteData, testDescription: deleteDesc, policyVerb: deleteVerb},
+ }
+ tenantsActionTests = []innerTest{
+ {permissionAction: authorization.CreateTenants, testDescription: createDesc, policyVerb: createVerb},
+ {permissionAction: authorization.ReadTenants, testDescription: readDesc, policyVerb: readVerb},
+ {permissionAction: authorization.UpdateTenants, testDescription: updateDesc, policyVerb: updateVerb},
+ {permissionAction: authorization.DeleteTenants, testDescription: deleteDesc, policyVerb: deleteVerb},
+ }
+ userTests = []innerTest{
+ {permissionAction: authorization.AssignAndRevokeUsers, testDescription: manageDesc, policyVerb: assignAndRevokeVerb},
+ {permissionAction: authorization.CreateUsers, testDescription: createDesc, policyVerb: createVerb},
+ {permissionAction: authorization.ReadUsers, testDescription: readDesc, policyVerb: readVerb},
+ {permissionAction: authorization.UpdateUsers, testDescription: updateDesc, policyVerb: updateVerb},
+ {permissionAction: authorization.DeleteUsers, testDescription: deleteDesc, policyVerb: deleteVerb},
+ }
+ groupTests = []innerTest{
+ {permissionAction: authorization.ReadGroups, testDescription: readDesc, policyVerb: readVerb},
+ {permissionAction: authorization.AssignAndRevokeGroups, testDescription: manageDesc, policyVerb: assignAndRevokeVerb},
+ }
+)
+
+func Test_policy(t *testing.T) {
+ tests := []struct {
+ name string
+ permission *models.Permission
+ policy *authorization.Policy
+ tests []innerTest
+ }{
+ {
+ name: "all roles",
+ permission: &models.Permission{
+ Roles: &models.PermissionRoles{Role: authorization.All, Scope: authorization.String(models.PermissionRolesScopeMatch)},
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinRoles("*"),
+ Domain: authorization.RolesDomain,
+ Verb: authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_MATCH),
+ },
+ tests: rolesTestsR,
+ },
+ {
+ name: "a role",
+ permission: &models.Permission{
+ Roles: &models.PermissionRoles{Role: authorization.String("admin"), Scope: authorization.String(models.PermissionRolesScopeMatch)},
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinRoles("admin"),
+ Domain: authorization.RolesDomain,
+ Verb: authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_MATCH),
+ },
+ tests: rolesTestsR,
+ },
+ {
+ name: "a role with scope all",
+ permission: &models.Permission{
+ Roles: &models.PermissionRoles{Role: authorization.String("admin"), Scope: authorization.String(models.PermissionRolesScopeAll)},
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinRoles("admin"),
+ Domain: authorization.RolesDomain,
+ Verb: authorization.VerbWithScope(authorization.READ, authorization.ROLE_SCOPE_ALL),
+ },
+ tests: rolesTestsCUD,
+ },
+ {
+ name: "cluster",
+ permission: &models.Permission{},
+ policy: &authorization.Policy{
+ Resource: CasbinClusters(),
+ Domain: authorization.ClusterDomain,
+ },
+ tests: clusterTests,
+ },
+ {
+ name: "minimal nodes",
+ permission: &models.Permission{
+ Nodes: &models.PermissionNodes{
+ Verbosity: authorization.String("minimal"),
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinNodes("minimal", "doesntmatter"),
+ Domain: authorization.NodesDomain,
+ },
+ tests: nodesTests,
+ },
+ {
+ name: "verbose nodes for all collections",
+ permission: &models.Permission{
+ Nodes: &models.PermissionNodes{
+ Verbosity: authorization.String("verbose"),
+ Collection: authorization.All,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinNodes("verbose", "*"),
+ Domain: authorization.NodesDomain,
+ },
+ tests: nodesTests,
+ },
+ {
+ name: "verbose nodes for one collections",
+ permission: &models.Permission{
+ Nodes: &models.PermissionNodes{
+ Verbosity: authorization.String("verbose"),
+ Collection: foo,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinNodes("verbose", "Foo"),
+ Domain: authorization.NodesDomain,
+ },
+ tests: nodesTests,
+ },
+ {
+ name: "all backends",
+ permission: &models.Permission{
+ Backups: &models.PermissionBackups{},
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinBackups("*"),
+ Domain: authorization.BackupsDomain,
+ },
+ tests: backupsTests,
+ },
+ {
+ name: "a backend",
+ permission: &models.Permission{
+ Backups: &models.PermissionBackups{
+ Collection: authorization.String("ABC"),
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinBackups("ABC"),
+ Domain: authorization.BackupsDomain,
+ },
+ tests: backupsTests,
+ },
+ {
+ name: "all collections",
+ permission: &models.Permission{
+ Collections: &models.PermissionCollections{},
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinSchema("*", "#"),
+ Domain: authorization.SchemaDomain,
+ },
+ tests: collectionsTests,
+ },
+ {
+ name: "a collection",
+ permission: &models.Permission{
+ Collections: &models.PermissionCollections{
+ Collection: foo,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinSchema("Foo", "#"),
+ Domain: authorization.SchemaDomain,
+ },
+ tests: collectionsTests,
+ },
+ {
+ name: "all tenants in all collections",
+ permission: &models.Permission{
+ Tenants: &models.PermissionTenants{},
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinSchema("*", "*"),
+ Domain: authorization.SchemaDomain,
+ },
+ tests: tenantsActionTests,
+ },
+ {
+ name: "all tenants in a collection",
+ permission: &models.Permission{
+ Tenants: &models.PermissionTenants{
+ Collection: foo,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinSchema("Foo", "*"),
+ Domain: authorization.SchemaDomain,
+ },
+ tests: tenantsActionTests,
+ },
+ {
+ name: "a tenant in all collections",
+ permission: &models.Permission{
+ Tenants: &models.PermissionTenants{
+ Tenant: bar,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinSchema("*", "bar"),
+ Domain: authorization.SchemaDomain,
+ },
+ tests: tenantsActionTests,
+ },
+ {
+ name: "a tenant in a collection",
+ permission: &models.Permission{
+ Tenants: &models.PermissionTenants{
+ Collection: foo,
+ Tenant: bar,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinSchema("Foo", "bar"),
+ Domain: authorization.SchemaDomain,
+ },
+ tests: tenantsActionTests,
+ },
+ {
+ name: "all objects in all collections ST",
+ permission: &models.Permission{
+ Data: &models.PermissionData{},
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinData("*", "*", "*"),
+ Domain: authorization.DataDomain,
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "all objects in a collection ST",
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Collection: foo,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinData("Foo", "*", "*"),
+ Domain: authorization.DataDomain,
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "an object in all collections ST",
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Object: baz,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinData("*", "*", "baz"),
+ Domain: authorization.DataDomain,
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "an object in a collection ST",
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Collection: foo,
+ Object: baz,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinData("Foo", "*", "baz"),
+ Domain: authorization.DataDomain,
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "all objects in all tenants in a collection MT",
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Collection: foo,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinData("Foo", "*", "*"),
+ Domain: authorization.DataDomain,
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "all objects in a tenant in all collections MT",
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Tenant: bar,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinData("*", "bar", "*"),
+ Domain: authorization.DataDomain,
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "all objects in a tenant in a collection MT",
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Collection: foo,
+ Tenant: bar,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinData("Foo", "bar", "*"),
+ Domain: authorization.DataDomain,
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "an object in all tenants in all collections MT",
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Object: baz,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinData("*", "*", "baz"),
+ Domain: authorization.DataDomain,
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "an object in all tenants in a collection MT",
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Collection: foo,
+ Object: baz,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinData("Foo", "*", "baz"),
+ Domain: authorization.DataDomain,
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "an object in a tenant in all collections MT",
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Tenant: bar,
+ Object: baz,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinData("*", "bar", "baz"),
+ Domain: authorization.DataDomain,
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "an object in a tenant in a collection MT",
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Collection: foo,
+ Tenant: bar,
+ Object: baz,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinData("Foo", "bar", "baz"),
+ Domain: authorization.DataDomain,
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "a tenant",
+ permission: &models.Permission{
+ Tenants: &models.PermissionTenants{
+ Collection: foo,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinSchema("Foo", ""),
+ Domain: authorization.SchemaDomain,
+ },
+ tests: tenantsActionTests,
+ },
+ {
+ name: "all tenants in all collections",
+ permission: &models.Permission{
+ Tenants: &models.PermissionTenants{},
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinSchema("*", "*"),
+ Domain: authorization.SchemaDomain,
+ },
+ tests: tenantsActionTests,
+ },
+ {
+ name: "all tenants in a collection",
+ permission: &models.Permission{
+ Tenants: &models.PermissionTenants{
+ Collection: foo,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinSchema("Foo", "*"),
+ Domain: authorization.SchemaDomain,
+ },
+ tests: tenantsActionTests,
+ },
+ {
+ name: "a tenant in all collections",
+ permission: &models.Permission{
+ Tenants: &models.PermissionTenants{
+ Tenant: bar,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinSchema("*", "bar"),
+ Domain: authorization.SchemaDomain,
+ },
+ tests: tenantsActionTests,
+ },
+ {
+ name: "a tenant in a collection",
+ permission: &models.Permission{
+ Tenants: &models.PermissionTenants{
+ Collection: foo,
+ Tenant: bar,
+ },
+ },
+ policy: &authorization.Policy{
+ Resource: CasbinSchema("Foo", "bar"),
+ Domain: authorization.SchemaDomain,
+ },
+ tests: tenantsActionTests,
+ },
+ }
+ for _, tt := range tests {
+ for _, ttt := range tt.tests {
+ t.Run(fmt.Sprintf("%s %s", ttt.testDescription, tt.name), func(t *testing.T) {
+ tt.permission.Action = authorization.String(ttt.permissionAction)
+ tt.policy.Verb = ttt.policyVerb
+
+ policy, err := policy(tt.permission)
+ require.Nil(t, err)
+ require.Equal(t, tt.policy, policy)
+ })
+ }
+ }
+}
+
+func Test_permission(t *testing.T) {
+ tests := []struct {
+ name string
+ policy []string
+ permission *models.Permission
+ tests []innerTest
+ }{
+ {
+ name: "all roles",
+ policy: []string{"p", "/*", "", authorization.RolesDomain},
+ permission: &models.Permission{
+ Roles: &models.PermissionRoles{Role: authorization.String("*"), Scope: authorization.String(models.PermissionRolesScopeMatch)},
+ },
+ tests: rolesTestsR,
+ },
+ {
+ name: "all roles",
+ policy: []string{"p", "/*", authorization.ROLE_SCOPE_MATCH, authorization.RolesDomain},
+ permission: &models.Permission{
+ Roles: authorization.AllRoles,
+ },
+ tests: rolesTestsCUD,
+ },
+ {
+ name: "a role",
+ policy: []string{"p", "/custom", authorization.ROLE_SCOPE_MATCH, authorization.RolesDomain},
+ permission: &models.Permission{
+ Roles: &models.PermissionRoles{Role: authorization.String("custom"), Scope: authorization.String(models.PermissionRolesScopeMatch)},
+ },
+ tests: rolesTestsR,
+ },
+ {
+ name: "a role",
+ policy: []string{"p", "/custom", authorization.ROLE_SCOPE_MATCH, authorization.RolesDomain},
+ permission: &models.Permission{
+ Roles: &models.PermissionRoles{Role: authorization.String("custom"), Scope: authorization.String(models.PermissionRolesScopeAll)},
+ },
+ tests: rolesTestsCUD,
+ },
+ {
+ name: "all users",
+ policy: []string{"p", "/*", "", authorization.UsersDomain},
+ permission: &models.Permission{
+ Users: authorization.AllUsers,
+ },
+ tests: userTests,
+ },
+ {
+ name: "cluster",
+ policy: []string{"p", "/*", "", authorization.ClusterDomain},
+ permission: &models.Permission{},
+ tests: clusterTests,
+ },
+ {
+ name: "minimal nodes",
+ policy: []string{"p", "/verbosity/minimal", "", authorization.NodesDomain},
+ permission: &models.Permission{
+ Nodes: &models.PermissionNodes{
+ Verbosity: authorization.String("minimal"),
+ },
+ },
+ tests: nodesTests,
+ },
+ {
+ name: "verbose nodes over all collections",
+ policy: []string{"p", "/verbosity/verbose/collections/*", "", authorization.NodesDomain},
+ permission: &models.Permission{
+ Nodes: &models.PermissionNodes{
+ Collection: authorization.All,
+ Verbosity: authorization.String("verbose"),
+ },
+ },
+ tests: nodesTests,
+ },
+ {
+ name: "verbose nodes over one collection",
+ policy: []string{"p", "/verbosity/verbose/collections/Foo", "", authorization.NodesDomain},
+ permission: &models.Permission{
+ Nodes: &models.PermissionNodes{
+ Collection: authorization.String("Foo"),
+ Verbosity: authorization.String("verbose"),
+ },
+ },
+ tests: nodesTests,
+ },
+ {
+ name: "all tenants",
+ policy: []string{"p", "/collections/*/shards/*", "", authorization.SchemaDomain},
+ permission: &models.Permission{
+ Tenants: authorization.AllTenants,
+ },
+ tests: tenantsActionTests,
+ },
+ {
+ name: "a tenant",
+ policy: []string{"p", "/collections/Foo/shards/*", "", authorization.SchemaDomain},
+ permission: &models.Permission{
+ Tenants: &models.PermissionTenants{
+ Collection: foo,
+ Tenant: authorization.All,
+ },
+ },
+ tests: tenantsActionTests,
+ },
+ {
+ name: "backup all collections",
+ policy: []string{"p", "/collections/*", "", "backups"},
+ permission: &models.Permission{
+ Backups: authorization.AllBackups,
+ },
+ tests: backupsTests,
+ },
+ {
+ name: "a collection ABC",
+ policy: []string{"p", "/collections/ABC", "", "backups"},
+ permission: &models.Permission{
+ Backups: &models.PermissionBackups{
+ Collection: authorization.String("ABC"),
+ },
+ },
+ tests: backupsTests,
+ },
+ {
+ name: "all collections",
+ policy: []string{"p", "/collections/*/shards/#", "", authorization.SchemaDomain},
+ permission: &models.Permission{
+ Collections: authorization.AllCollections,
+ },
+ tests: collectionsTests,
+ },
+ {
+ name: "a collection",
+ policy: []string{"p", "/collections/Foo/shards/#", "", authorization.SchemaDomain},
+ permission: &models.Permission{
+ Collections: &models.PermissionCollections{
+ Collection: foo,
+ },
+ },
+ tests: collectionsTests,
+ },
+ {
+ name: "all tenants in all collections",
+ policy: []string{"p", "/collections/*/shards/*", "", authorization.SchemaDomain},
+ permission: &models.Permission{
+ Tenants: authorization.AllTenants,
+ },
+ tests: tenantsActionTests,
+ },
+ {
+ name: "all tenants in a collection",
+ policy: []string{"p", "/collections/Foo/shards/*", "", authorization.SchemaDomain},
+ permission: &models.Permission{
+ Tenants: &models.PermissionTenants{
+ Collection: foo,
+ Tenant: authorization.All,
+ },
+ },
+ tests: tenantsActionTests,
+ },
+ {
+ name: "a tenant in all collections",
+ policy: []string{"p", "/collections/*/shards/bar", "", authorization.SchemaDomain},
+ permission: &models.Permission{
+ Tenants: &models.PermissionTenants{
+ Collection: authorization.All,
+ Tenant: bar,
+ },
+ },
+ tests: tenantsActionTests,
+ },
+ {
+ name: "a tenant in a collection",
+ policy: []string{"p", "/collections/Foo/shards/bar", "", authorization.SchemaDomain},
+ permission: &models.Permission{
+ Tenants: &models.PermissionTenants{
+ Collection: foo,
+ Tenant: bar,
+ },
+ },
+ tests: tenantsActionTests,
+ },
+ {
+ name: "all objects in all collections ST",
+ policy: []string{"p", "/collections/*/shards/*/objects/*", "", authorization.DataDomain},
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Collection: authorization.All,
+ Tenant: authorization.All,
+ Object: authorization.All,
+ },
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "all objects in a collection ST",
+ policy: []string{"p", "/collections/Foo/shards/*/objects/*", "", authorization.DataDomain},
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Collection: foo,
+ Tenant: authorization.All,
+ Object: authorization.All,
+ },
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "an object in all collections ST",
+ policy: []string{"p", "/collections/*/shards/*/objects/baz", "", authorization.DataDomain},
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Collection: authorization.All,
+ Tenant: authorization.All,
+ Object: baz,
+ },
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "an object in a collection ST",
+ policy: []string{"p", "/collections/Foo/shards/*/objects/baz", "", authorization.DataDomain},
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Collection: foo,
+ Tenant: authorization.All,
+ Object: baz,
+ },
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "all objects in all tenants in all collections MT",
+ policy: []string{"p", "/collections/*/shards/*/objects/*", "", authorization.DataDomain},
+ permission: &models.Permission{
+ Data: authorization.AllData,
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "all objects in all tenants in a collection MT",
+ policy: []string{"p", "/collections/Foo/shards/*/objects/*", "", authorization.DataDomain},
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Collection: foo,
+ Tenant: authorization.All,
+ Object: authorization.All,
+ },
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "all objects in a tenant in all collections MT",
+ policy: []string{"p", "/collections/*/shards/bar/objects/*", "", authorization.DataDomain},
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Collection: authorization.All,
+ Tenant: bar,
+ Object: authorization.All,
+ },
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "all objects in a tenant in a collection MT",
+ policy: []string{"p", "/collections/Foo/shards/bar/objects/*", "", authorization.DataDomain},
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Collection: foo,
+ Tenant: bar,
+ Object: authorization.All,
+ },
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "an object in all tenants in all collections MT",
+ policy: []string{"p", "/collections/*/shards/*/objects/baz", "", authorization.DataDomain},
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Collection: authorization.All,
+ Tenant: authorization.All,
+ Object: baz,
+ },
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "an object in all tenants in a collection MT",
+ policy: []string{"p", "/collections/Foo/shards/*/objects/baz", "", authorization.DataDomain},
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Collection: foo,
+ Tenant: authorization.All,
+ Object: baz,
+ },
+ },
+ },
+ {
+ name: "an object in a tenant in all collections MT",
+ policy: []string{"p", "/collections/*/shards/bar/objects/baz", "", authorization.DataDomain},
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Collection: authorization.All,
+ Tenant: bar,
+ Object: baz,
+ },
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "an object in a tenant in a collection MT",
+ policy: []string{"p", "/collections/Foo/shards/bar/objects/baz", "", authorization.DataDomain},
+ permission: &models.Permission{
+ Data: &models.PermissionData{
+ Collection: foo,
+ Tenant: bar,
+ Object: baz,
+ },
+ },
+ tests: objectsDataTests,
+ },
+ {
+ name: "a user",
+ policy: []string{"p", "/baz", "", authorization.UsersDomain},
+ permission: &models.Permission{
+ Users: &models.PermissionUsers{
+ Users: baz,
+ },
+ },
+ tests: userTests,
+ },
+ {
+ name: "all users",
+ policy: []string{"p", "/*", "", authorization.UsersDomain},
+ permission: &models.Permission{
+ Users: &models.PermissionUsers{
+ Users: authorization.All,
+ },
+ },
+ tests: userTests,
+ },
+ {
+ name: "a group",
+ policy: []string{"p", "/oidc/baz", "", authorization.GroupsDomain},
+ permission: &models.Permission{
+ Groups: &models.PermissionGroups{
+ Group: baz,
+ GroupType: models.GroupTypeOidc,
+ },
+ },
+ tests: groupTests,
+ },
+ {
+ name: "all groups",
+ policy: []string{"p", "/oidc/*", "", authorization.GroupsDomain},
+ permission: &models.Permission{
+ Groups: &models.PermissionGroups{
+ Group: authorization.All,
+ GroupType: models.GroupTypeOidc,
+ },
+ },
+ tests: groupTests,
+ },
+ }
+ for _, tt := range tests {
+ tt.policy[1] = fmt.Sprintf("%s%s", tt.policy[3], tt.policy[1])
+ for _, ttt := range tt.tests {
+ t.Run(fmt.Sprintf("%s %s", ttt.testDescription, tt.name), func(t *testing.T) {
+ policyForTest := make([]string, len(tt.policy))
+ copy(policyForTest, tt.policy)
+ tt.permission.Action = authorization.String(ttt.permissionAction)
+ // TODO-RBAC : this test has to be rewritten and consider scopes
+ if policyForTest[2] == authorization.ROLE_SCOPE_MATCH {
+ policyForTest[2] = ttt.policyVerb + "_" + authorization.ROLE_SCOPE_MATCH
+ } else {
+ policyForTest[2] = ttt.policyVerb
+ }
+ permission, err := permission(policyForTest, true)
+ require.Nil(t, err)
+ require.Equal(t, tt.permission, permission)
+ })
+ }
+ }
+}
+
+func Test_pUsers(t *testing.T) {
+ tests := []struct {
+ user string
+ expected string
+ }{
+ {user: "", expected: fmt.Sprintf("%s/.*", authorization.UsersDomain)},
+ {user: "*", expected: fmt.Sprintf("%s/.*", authorization.UsersDomain)},
+ {user: "foo", expected: fmt.Sprintf("%s/foo", authorization.UsersDomain)},
+ }
+ for _, tt := range tests {
+ name := fmt.Sprintf("user: %s", tt.user)
+ t.Run(name, func(t *testing.T) {
+ p := CasbinUsers(tt.user)
+ require.Equal(t, tt.expected, p)
+ })
+ }
+}
+
+func Test_pGroups(t *testing.T) {
+ tests := []struct {
+ group string
+ expected string
+ }{
+ {group: "", expected: fmt.Sprintf("%s/%s/.*", authorization.GroupsDomain, authentication.AuthTypeOIDC)},
+ {group: "*", expected: fmt.Sprintf("%s/%s/.*", authorization.GroupsDomain, authentication.AuthTypeOIDC)},
+ {group: "foo", expected: fmt.Sprintf("%s/%s/foo", authorization.GroupsDomain, authentication.AuthTypeOIDC)},
+ }
+ for _, tt := range tests {
+ name := fmt.Sprintf("group: %s", tt.group)
+ t.Run(name, func(t *testing.T) {
+ p := CasbinGroups(tt.group, string(authentication.AuthTypeOIDC))
+ require.Equal(t, tt.expected, p)
+ })
+ }
+}
+
+func Test_pRoles(t *testing.T) {
+ tests := []struct {
+ role string
+ expected string
+ }{
+ {role: "", expected: fmt.Sprintf("%s/.*", authorization.RolesDomain)},
+ {role: "*", expected: fmt.Sprintf("%s/.*", authorization.RolesDomain)},
+ {role: "foo", expected: fmt.Sprintf("%s/foo", authorization.RolesDomain)},
+ }
+ for _, tt := range tests {
+ name := fmt.Sprintf("role: %s", tt.role)
+ t.Run(name, func(t *testing.T) {
+ p := CasbinRoles(tt.role)
+ require.Equal(t, tt.expected, p)
+ })
+ }
+}
+
+func Test_pCollections(t *testing.T) {
+ tests := []struct {
+ collection string
+ expected string
+ }{
+ {collection: "", expected: fmt.Sprintf("%s/collections/.*/shards/.*", authorization.SchemaDomain)},
+ {collection: "*", expected: fmt.Sprintf("%s/collections/.*/shards/.*", authorization.SchemaDomain)},
+ {collection: "foo", expected: fmt.Sprintf("%s/collections/Foo/shards/.*", authorization.SchemaDomain)},
+ }
+ for _, tt := range tests {
+ name := fmt.Sprintf("collection: %s", tt.collection)
+ t.Run(name, func(t *testing.T) {
+ p := CasbinSchema(tt.collection, "")
+ require.Equal(t, tt.expected, p)
+ })
+ }
+}
+
+func Test_CasbinShards(t *testing.T) {
+ tests := []struct {
+ collection string
+ shard string
+ expected string
+ }{
+ {collection: "", shard: "", expected: fmt.Sprintf("%s/collections/.*/shards/.*", authorization.SchemaDomain)},
+ {collection: "*", shard: "*", expected: fmt.Sprintf("%s/collections/.*/shards/.*", authorization.SchemaDomain)},
+ {collection: "foo", shard: "", expected: fmt.Sprintf("%s/collections/Foo/shards/.*", authorization.SchemaDomain)},
+ {collection: "foo", shard: "*", expected: fmt.Sprintf("%s/collections/Foo/shards/.*", authorization.SchemaDomain)},
+ {collection: "", shard: "bar", expected: fmt.Sprintf("%s/collections/.*/shards/bar", authorization.SchemaDomain)},
+ {collection: "*", shard: "bar", expected: fmt.Sprintf("%s/collections/.*/shards/bar", authorization.SchemaDomain)},
+ {collection: "foo", shard: "bar", expected: fmt.Sprintf("%s/collections/Foo/shards/bar", authorization.SchemaDomain)},
+ }
+ for _, tt := range tests {
+ name := fmt.Sprintf("collection: %s; shard: %s", tt.collection, tt.shard)
+ t.Run(name, func(t *testing.T) {
+ p := CasbinSchema(tt.collection, tt.shard)
+ require.Equal(t, tt.expected, p)
+ })
+ }
+}
+
+func Test_pObjects(t *testing.T) {
+ tests := []struct {
+ collection string
+ shard string
+ object string
+ expected string
+ }{
+ {collection: "", shard: "", object: "", expected: fmt.Sprintf("%s/collections/.*/shards/.*/objects/.*", authorization.DataDomain)},
+ {collection: "*", shard: "*", object: "*", expected: fmt.Sprintf("%s/collections/.*/shards/.*/objects/.*", authorization.DataDomain)},
+ {collection: "foo", shard: "", object: "", expected: fmt.Sprintf("%s/collections/Foo/shards/.*/objects/.*", authorization.DataDomain)},
+ {collection: "foo", shard: "*", object: "*", expected: fmt.Sprintf("%s/collections/Foo/shards/.*/objects/.*", authorization.DataDomain)},
+ {collection: "", shard: "bar", object: "", expected: fmt.Sprintf("%s/collections/.*/shards/bar/objects/.*", authorization.DataDomain)},
+ {collection: "*", shard: "bar", object: "*", expected: fmt.Sprintf("%s/collections/.*/shards/bar/objects/.*", authorization.DataDomain)},
+ {collection: "", shard: "", object: "baz", expected: fmt.Sprintf("%s/collections/.*/shards/.*/objects/baz", authorization.DataDomain)},
+ {collection: "*", shard: "*", object: "baz", expected: fmt.Sprintf("%s/collections/.*/shards/.*/objects/baz", authorization.DataDomain)},
+ {collection: "foo", shard: "bar", object: "", expected: fmt.Sprintf("%s/collections/Foo/shards/bar/objects/.*", authorization.DataDomain)},
+ {collection: "foo", shard: "bar", object: "*", expected: fmt.Sprintf("%s/collections/Foo/shards/bar/objects/.*", authorization.DataDomain)},
+ {collection: "foo", shard: "", object: "baz", expected: fmt.Sprintf("%s/collections/Foo/shards/.*/objects/baz", authorization.DataDomain)},
+ {collection: "foo", shard: "*", object: "baz", expected: fmt.Sprintf("%s/collections/Foo/shards/.*/objects/baz", authorization.DataDomain)},
+ {collection: "", shard: "bar", object: "baz", expected: fmt.Sprintf("%s/collections/.*/shards/bar/objects/baz", authorization.DataDomain)},
+ {collection: "*", shard: "bar", object: "baz", expected: fmt.Sprintf("%s/collections/.*/shards/bar/objects/baz", authorization.DataDomain)},
+ {collection: "foo", shard: "bar", object: "baz", expected: fmt.Sprintf("%s/collections/Foo/shards/bar/objects/baz", authorization.DataDomain)},
+ }
+ for _, tt := range tests {
+ name := fmt.Sprintf("collection: %s; shard: %s; object: %s", tt.collection, tt.shard, tt.object)
+ t.Run(name, func(t *testing.T) {
+ p := CasbinData(tt.collection, tt.shard, tt.object)
+ require.Equal(t, tt.expected, p)
+ })
+ }
+}
+
+func Test_CasbinBackups(t *testing.T) {
+ tests := []struct {
+ backend string
+ expected string
+ }{
+ {backend: "", expected: fmt.Sprintf("%s/collections/.*", authorization.BackupsDomain)},
+ {backend: "*", expected: fmt.Sprintf("%s/collections/.*", authorization.BackupsDomain)},
+ {backend: "foo", expected: fmt.Sprintf("%s/collections/Foo", authorization.BackupsDomain)},
+ {backend: "foo", expected: fmt.Sprintf("%s/collections/Foo", authorization.BackupsDomain)},
+ {backend: "", expected: fmt.Sprintf("%s/collections/.*", authorization.BackupsDomain)},
+ {backend: "*", expected: fmt.Sprintf("%s/collections/.*", authorization.BackupsDomain)},
+ {backend: "foo", expected: fmt.Sprintf("%s/collections/Foo", authorization.BackupsDomain)},
+ }
+ for _, tt := range tests {
+ name := fmt.Sprintf("backend: %s", tt.backend)
+ t.Run(name, func(t *testing.T) {
+ p := CasbinBackups(tt.backend)
+ require.Equal(t, tt.expected, p)
+ })
+ }
+}
+
+func Test_fromCasbinResource(t *testing.T) {
+ tests := []struct {
+ resource string
+ expected string
+ }{
+ {resource: "collections/.*/shards/.*/objects/.*", expected: "collections/*/shards/*/objects/*"},
+ }
+ for _, tt := range tests {
+ name := fmt.Sprintf("Resource: %s", tt.resource)
+ t.Run(name, func(t *testing.T) {
+ p := fromCasbinResource(tt.resource)
+ require.Equal(t, tt.expected, p)
+ })
+ }
+}
+
+func TestValidResource(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ expected bool
+ }{
+ {
+ name: "valid resource - users",
+ input: fmt.Sprintf("%s/testUser", authorization.UsersDomain),
+ expected: true,
+ },
+ {
+ name: "valid resource - groups",
+ input: fmt.Sprintf("%s/testGroups", authorization.GroupsDomain),
+ expected: true,
+ },
+ {
+ name: "valid resource - roles",
+ input: fmt.Sprintf("%s/testRole", authorization.RolesDomain),
+ expected: true,
+ },
+ {
+ name: "valid resource - collections",
+ input: fmt.Sprintf("%s/collections/testCollection", authorization.SchemaDomain),
+ expected: true,
+ },
+ {
+ name: "valid resource - objects",
+ input: fmt.Sprintf("%s/collections/testCollection/shards/testShard/objects/testObject", authorization.DataDomain),
+ expected: true,
+ },
+ {
+ name: "invalid resource",
+ input: "invalid/resource",
+ expected: false,
+ },
+ {
+ name: "invalid resource",
+ input: "some resource",
+ expected: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := validResource(tt.input)
+ require.Equal(t, tt.expected, result)
+ })
+ }
+}
+
+func TestValidVerb(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ expected bool
+ }{
+ {
+ name: "valid verb - create",
+ input: "C",
+ expected: true,
+ },
+ {
+ name: "valid verb - read",
+ input: "R",
+ expected: true,
+ },
+ {
+ name: "valid verb - update",
+ input: "U",
+ expected: true,
+ },
+ {
+ name: "valid verb - delete",
+ input: "D",
+ expected: true,
+ },
+ {
+ name: "All",
+ input: "CRUD",
+ expected: true,
+ },
+ {
+ name: "invalid verb",
+ input: "X",
+ expected: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := validVerb(tt.input)
+ require.Equal(t, tt.expected, result)
+ })
+ }
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/docs/auth_calls.md b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/docs/auth_calls.md
new file mode 100644
index 0000000000000000000000000000000000000000..6a05aafda66875c9ed47fdd7d67db36d16984525
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/docs/auth_calls.md
@@ -0,0 +1,80 @@
+# Authorization Calls
+This document lists all authorization calls in the codebase.
+## Usage
+To regenerate this documentation, run the following commands from the repository root:
+```bash
+cd usecases/auth/authorization/docs
+go run generator.go
+```
+## Statistics
+- Total files found: 23162
+- Files processed: 1937
+- Total Authorize calls found: 79
+
+| Function | File | Verb → Resources |
+|----------|------|-----------------|
+| resolveAggregate | adapters/handlers/graphql/local/aggregate/resolver.go | READ → ShardsData |
+| authorizePath | adapters/handlers/graphql/local/common_filters/authz.go | READ → CollectionsData |
+| AuthorizeProperty | adapters/handlers/graphql/local/common_filters/authz.go | READ → CollectionsData |
+| resolveExplore | adapters/handlers/graphql/local/explore/concepts_resolver.go | READ → CollectionsData |
+| resolveGet | adapters/handlers/graphql/local/get/class_builder_fields.go | READ → ShardsData |
+| batchDelete | adapters/handlers/grpc/v1/service.go | DELETE → ShardsData |
+| batchObjects | adapters/handlers/grpc/v1/service.go | UPDATE → ShardsData
CREATE → ShardsData |
+| classGetterWithAuthzFunc | adapters/handlers/grpc/v1/service.go | READ → Collections |
+| createRole | adapters/handlers/rest/authz/handlers_authz.go | CREATE → Roles |
+| addPermissions | adapters/handlers/rest/authz/handlers_authz.go | UPDATE → Roles |
+| removePermissions | adapters/handlers/rest/authz/handlers_authz.go | UPDATE → Roles |
+| hasPermission | adapters/handlers/rest/authz/handlers_authz.go | READ → Roles |
+| getRoles | adapters/handlers/rest/authz/handlers_authz.go | READ → Roles |
+| getRole | adapters/handlers/rest/authz/handlers_authz.go | READ → Roles |
+| deleteRole | adapters/handlers/rest/authz/handlers_authz.go | DELETE → Roles |
+| assignRole | adapters/handlers/rest/authz/handlers_authz.go | UPDATE → Roles |
+| getRolesForUser | adapters/handlers/rest/authz/handlers_authz.go | READ → Roles |
+| getUsersForRole | adapters/handlers/rest/authz/handlers_authz.go | READ → Roles |
+| revokeRole | adapters/handlers/rest/authz/handlers_authz.go | UPDATE → Roles |
+| setupGraphQLHandlers | adapters/handlers/rest/handlers_graphql.go | READ → CollectionsMetadata, Collections |
+| Backup | usecases/backup/scheduler.go | CREATE → Backups |
+| Restore | usecases/backup/scheduler.go | CREATE → Backups |
+| Cancel | usecases/backup/scheduler.go | DELETE → Backups |
+| Schedule | usecases/classification/classifier.go | UPDATE → CollectionsMetadata |
+| validateFilter | usecases/classification/classifier.go | READ → CollectionsMetadata |
+| Get | usecases/classification/classifier.go | READ → CollectionsMetadata |
+| GetNodeStatus | usecases/nodes/handler.go | READ → Nodes |
+| GetNodeStatistics | usecases/nodes/handler.go | READ → Cluster |
+| AddObject | usecases/objects/add.go | CREATE → ShardsData
READ → CollectionsMetadata |
+| autoSchema | usecases/objects/auto_schema.go | CREATE → CollectionsMetadata
UPDATE → CollectionsMetadata |
+| AddObjects | usecases/objects/batch_add.go | UPDATE → ShardsData
CREATE → ShardsData |
+| DeleteObjects | usecases/objects/batch_delete.go | DELETE → ShardsData |
+| classGetterFunc | usecases/objects/batch_delete.go | READ → Collections |
+| AddReferences | usecases/objects/batch_references_add.go | UPDATE → pathsData
READ → pathsMetadata |
+| addReferences | usecases/objects/batch_references_add.go | READ → shardsDataPaths |
+| DeleteObject | usecases/objects/delete.go | READ → CollectionsMetadata
DELETE → Objects |
+| GetObject | usecases/objects/get.go | READ → Objects |
+| GetObjects | usecases/objects/get.go | READ → Objects |
+| GetObjectsClass | usecases/objects/get.go | READ → Objects |
+| HeadObject | usecases/objects/head.go | READ → Objects, CollectionsMetadata |
+| MergeObject | usecases/objects/merge.go | UPDATE → Objects
READ → CollectionsMetadata |
+| Query | usecases/objects/query.go | READ → CollectionsMetadata |
+| AddObjectReference | usecases/objects/references_add.go | UPDATE → ShardsData
READ → CollectionsMetadata, Collections |
+| DeleteObjectReference | usecases/objects/references_delete.go | UPDATE → ShardsData
READ → CollectionsMetadata, CollectionsData |
+| UpdateObjectReferences | usecases/objects/references_update.go | UPDATE → ShardsData
READ → CollectionsMetadata, Collections |
+| UpdateObject | usecases/objects/update.go | UPDATE → Objects
READ → CollectionsMetadata |
+| ValidateObject | usecases/objects/validate.go | READ → Objects |
+| GetClass | usecases/schema/class.go | READ → CollectionsMetadata |
+| GetConsistentClass | usecases/schema/class.go | READ → CollectionsMetadata |
+| GetCachedClass | usecases/schema/class.go | READ → CollectionsMetadata |
+| AddClass | usecases/schema/class.go | CREATE → CollectionsMetadata
READ → CollectionsMetadata |
+| DeleteClass | usecases/schema/class.go | DELETE → CollectionsMetadata
READ → CollectionsMetadata |
+| UpdateClass | usecases/schema/class.go | UPDATE → CollectionsMetadata |
+| GetSchema | usecases/schema/handler.go | READ → CollectionsMetadata |
+| GetConsistentSchema | usecases/schema/handler.go | READ → CollectionsMetadata |
+| UpdateShardStatus | usecases/schema/handler.go | UPDATE → ShardsMetadata |
+| ShardsStatus | usecases/schema/handler.go | READ → ShardsMetadata |
+| AddClassProperty | usecases/schema/property.go | UPDATE → CollectionsMetadata
READ → CollectionsMetadata |
+| DeleteClassProperty | usecases/schema/property.go | UPDATE → CollectionsMetadata |
+| AddTenants | usecases/schema/tenant.go | CREATE → ShardsMetadata |
+| UpdateTenants | usecases/schema/tenant.go | UPDATE → ShardsMetadata |
+| DeleteTenants | usecases/schema/tenant.go | DELETE → ShardsMetadata |
+| GetConsistentTenants | usecases/schema/tenant.go | READ → ShardsMetadata |
+| ConsistentTenantExists | usecases/schema/tenant.go | READ → ShardsMetadata |
+| validateFilters | usecases/traverser/traverser_get.go | READ → CollectionsMetadata |
diff --git a/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/docs/generator.go b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/docs/generator.go
new file mode 100644
index 0000000000000000000000000000000000000000..fdd803f9384c5b9e163e21f477f533bbbc4c17e1
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/docs/generator.go
@@ -0,0 +1,257 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+//go:build ignore
+// +build ignore
+
+package main
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+type authCall struct {
+ Function string
+ // Map verbs to their resources
+ VerbResourceMap map[string][]string // key: verb, value: slice of resources
+ FilePath string
+}
+
+const mainDirPath = "../../../../"
+
+func main() {
+ var calls []authCall
+ var totalFiles, skippedFiles, processedFiles int
+
+ err := filepath.Walk(mainDirPath, func(path string, info os.FileInfo, err error) error {
+ totalFiles++
+
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error accessing path %s: %v\n", path, err)
+ return nil
+ }
+
+ // Skip directories
+ if info.IsDir() {
+ if shouldSkipDir(path) {
+ fmt.Fprintf(os.Stderr, "Skipping directory: %s\n", path)
+ return filepath.SkipDir
+ }
+ return nil
+ }
+
+ // Skip non-go files, vendor, and test files
+ if !strings.HasSuffix(path, ".go") ||
+ strings.Contains(path, "/vendor/") ||
+ strings.HasSuffix(path, "_test.go") {
+ skippedFiles++
+ return nil
+ }
+
+ fmt.Fprintf(os.Stderr, "Processing file: %s\n", path)
+ processedFiles++
+
+ // Read and parse the file
+ fset := token.NewFileSet()
+ content, err := os.ReadFile(path)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error reading %s: %v\n", path, err)
+ return nil
+ }
+
+ node, err := parser.ParseFile(fset, path, content, 0)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error parsing %s: %v\n", path, err)
+ return nil
+ }
+
+ // Visit all nodes in the AST
+ ast.Inspect(node, func(n ast.Node) bool {
+ if call, ok := n.(*ast.CallExpr); ok {
+ if sel, ok := call.Fun.(*ast.SelectorExpr); ok {
+ if sel.Sel.Name == "Authorize" {
+ // Find the containing function
+ var funcName string
+ ast.Inspect(node, func(parent ast.Node) bool {
+ if fn, ok := parent.(*ast.FuncDecl); ok {
+ if fn.Pos() <= call.Pos() && call.End() <= fn.End() {
+ // Skip test functions
+ if !strings.HasPrefix(fn.Name.Name, "Test") {
+ funcName = fn.Name.Name
+ }
+ return false
+ }
+ }
+ return true
+ })
+
+ // Skip if no valid function name (e.g., test function)
+ if funcName == "" {
+ return true
+ }
+
+ if len(call.Args) >= 3 {
+ verb := formatArg(call.Args[1])
+ resource := formatArg(call.Args[2])
+
+ verb = strings.TrimPrefix(verb, "&")
+ verb = strings.TrimPrefix(verb, "authorization.")
+ resource = strings.TrimPrefix(resource, "authorization.")
+
+ // Check if we already have an entry for this function+file
+ if idx, found := findOrCreateCall(calls, funcName, path); found {
+ // Initialize map if needed
+ if calls[idx].VerbResourceMap == nil {
+ calls[idx].VerbResourceMap = make(map[string][]string)
+ }
+ // Add resource to verb's resource list if not already present
+ if !contains(calls[idx].VerbResourceMap[verb], resource) {
+ calls[idx].VerbResourceMap[verb] = append(calls[idx].VerbResourceMap[verb], resource)
+ }
+ } else {
+ verbMap := make(map[string][]string)
+ verbMap[verb] = []string{resource}
+ calls = append(calls, authCall{
+ Function: funcName,
+ VerbResourceMap: verbMap,
+ FilePath: path,
+ })
+ }
+ }
+ }
+ }
+ }
+ return true
+ })
+ return nil
+ })
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error walking directory: %v\n", err)
+ os.Exit(1)
+ }
+
+ // Print statistics
+ fmt.Fprintf(os.Stderr, "\nStatistics:\n")
+ fmt.Fprintf(os.Stderr, "Total files found: %d\n", totalFiles)
+ fmt.Fprintf(os.Stderr, "Files skipped: %d\n", skippedFiles)
+ fmt.Fprintf(os.Stderr, "Files processed: %d\n", processedFiles)
+
+ // Count total auth calls by summing the length of VerbResourceMap for each function
+ totalCalls := 0
+ for _, call := range calls {
+ totalCalls += len(call.VerbResourceMap)
+ }
+ fmt.Fprintf(os.Stderr, "Total Authorize calls found: %d\n", totalCalls)
+
+ // Create and write to the markdown file
+ f, err := os.Create("auth_calls.md")
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error creating file: %v\n", err)
+ os.Exit(1)
+ }
+ defer f.Close()
+
+ // Write the table header
+ fmt.Fprintln(f, "# Authorization Calls")
+ fmt.Fprintln(f, "This document lists all authorization calls in the codebase.")
+
+ // Add usage section
+ fmt.Fprintln(f, "## Usage")
+ fmt.Fprintln(f, "To regenerate this documentation, run the following commands from the repository root:")
+ fmt.Fprintln(f, "```bash")
+ fmt.Fprintln(f, "cd usecases/auth/authorization/docs")
+ fmt.Fprintln(f, "go run generator.go")
+ fmt.Fprintln(f, "```")
+
+ // Continue with statistics section
+ fmt.Fprintln(f, "## Statistics")
+ fmt.Fprintf(f, "- Total files found: %d\n", totalFiles)
+ fmt.Fprintf(f, "- Files processed: %d\n", processedFiles)
+ fmt.Fprintf(f, "- Total Authorize calls found: %d\n\n", totalCalls)
+
+ fmt.Fprintln(f, "| Function | File | Verb → Resources |")
+ fmt.Fprintln(f, "|----------|------|-----------------|")
+
+ // Write each call in table format
+ for _, call := range calls {
+ var mappings []string
+ for verb, resources := range call.VerbResourceMap {
+ mappings = append(mappings, fmt.Sprintf("%s → %s", verb, strings.Join(resources, ", ")))
+ }
+ fmt.Fprintf(f, "| %s | %s | %s |\n",
+ call.Function,
+ strings.TrimPrefix(call.FilePath, mainDirPath),
+ strings.Join(mappings, "
"),
+ )
+ }
+
+ fmt.Fprintf(os.Stderr, "Results written to auth_calls.md\n")
+}
+
+func shouldSkipDir(path string) bool {
+ skippedDirs := []string{
+ ".git",
+ "vendor",
+ "node_modules",
+ "dist",
+ "build",
+ }
+
+ base := filepath.Base(path)
+ for _, skip := range skippedDirs {
+ if base == skip {
+ return true
+ }
+ }
+ return false
+}
+
+func formatArg(expr ast.Expr) string {
+ switch v := expr.(type) {
+ case *ast.SelectorExpr:
+ if ident, ok := v.X.(*ast.Ident); ok {
+ return fmt.Sprintf("%s.%s", ident.Name, v.Sel.Name)
+ }
+ case *ast.Ident:
+ return v.Name
+ case *ast.CallExpr:
+ if sel, ok := v.Fun.(*ast.SelectorExpr); ok {
+ if ident, ok := sel.X.(*ast.Ident); ok {
+ return fmt.Sprintf("%s.%s", ident.Name, sel.Sel.Name)
+ }
+ }
+ }
+ return fmt.Sprintf("%v", expr)
+}
+
+func findOrCreateCall(calls []authCall, function, filePath string) (int, bool) {
+ for i, call := range calls {
+ if call.Function == function && call.FilePath == filePath {
+ return i, true
+ }
+ }
+ return 0, false
+}
+
+func contains(slice []string, item string) bool {
+ for _, s := range slice {
+ if s == item {
+ return true
+ }
+ }
+ return false
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/errors/errors.go b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/errors/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..38a5f2fa0aa515efb1bce231a7538ce690b4206e
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/errors/errors.go
@@ -0,0 +1,69 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package errors
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/weaviate/weaviate/entities/models"
+)
+
+// Forbidden indicates a failed authorization
+type Forbidden struct {
+ principal *models.Principal
+ verb string
+ resources []string
+}
+
+type Unauthenticated struct{}
+
+func (u Unauthenticated) Error() string {
+ return "user is not authenticated"
+}
+
+// NewUnauthenticated creates an explicit Unauthenticated error
+func NewUnauthenticated() Unauthenticated {
+ return Unauthenticated{}
+}
+
+// NewForbidden creates an explicit Forbidden error with details about the
+// principal and the attempted access on a specific resource
+func NewForbidden(principal *models.Principal, verb string, resources ...string) Forbidden {
+ return Forbidden{
+ principal: principal,
+ verb: verb,
+ resources: resources,
+ }
+}
+
+func (f Forbidden) Error() string {
+ optionalGroups := ""
+ if len(f.principal.Groups) == 1 {
+ optionalGroups = fmt.Sprintf(" (of group '%s')", f.principal.Groups[0])
+ } else if len(f.principal.Groups) > 1 {
+ groups := wrapInSingleQuotes(f.principal.Groups)
+ groupsList := strings.Join(groups, ", ")
+ optionalGroups = fmt.Sprintf(" (of groups %s)", groupsList)
+ }
+
+ return fmt.Sprintf("authorization, forbidden action: user '%s'%s has insufficient permissions to %s %s",
+ f.principal.Username, optionalGroups, f.verb, f.resources)
+}
+
+func wrapInSingleQuotes(input []string) []string {
+ for i, s := range input {
+ input[i] = fmt.Sprintf("'%s'", s)
+ }
+
+ return input
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/errors/errors_test.go b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/errors/errors_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..facc4323977acd0ef278508d26b14258f28ad8b4
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/errors/errors_test.go
@@ -0,0 +1,52 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package errors
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/weaviate/weaviate/entities/models"
+)
+
+func Test_ForbiddenError_NoGroups(t *testing.T) {
+ principal := &models.Principal{
+ Username: "john",
+ }
+
+ err := NewForbidden(principal, "delete", "schema/things")
+ expectedErrMsg := "authorization, forbidden action: user 'john' has insufficient permissions to delete [schema/things]"
+ assert.Equal(t, expectedErrMsg, err.Error())
+}
+
+func Test_ForbiddenError_SingleGroup(t *testing.T) {
+ principal := &models.Principal{
+ Username: "john",
+ Groups: []string{"worstusers"},
+ }
+
+ err := NewForbidden(principal, "delete", "schema/things")
+ expectedErrMsg := "authorization, forbidden action: user 'john' (of group 'worstusers') has insufficient permissions to delete [schema/things]"
+ assert.Equal(t, expectedErrMsg, err.Error())
+}
+
+func Test_ForbiddenError_MultipleGroups(t *testing.T) {
+ principal := &models.Principal{
+ Username: "john",
+ Groups: []string{"worstusers", "fraudsters", "evilpeople"},
+ }
+
+ err := NewForbidden(principal, "delete", "schema/things")
+ expectedErrMsg := "authorization, forbidden action: user 'john' (of groups 'worstusers', 'fraudsters', 'evilpeople') " +
+ "has insufficient permissions to delete [schema/things]"
+ assert.Equal(t, expectedErrMsg, err.Error())
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/filter/filter.go b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/filter/filter.go
new file mode 100644
index 0000000000000000000000000000000000000000..d8e59ada7e526926223a8bb015d087510c83d676
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/filter/filter.go
@@ -0,0 +1,122 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package filter
+
+import (
+ "context"
+ "slices"
+
+ "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/usecases/auth/authorization"
+)
+
+// ResourceFilter handles filtering resources based on authorization
+type ResourceFilter[T any] struct {
+ authorizer authorization.Authorizer
+ config rbacconf.Config
+}
+
+func New[T any](authorizer authorization.Authorizer, config rbacconf.Config) *ResourceFilter[T] {
+ return &ResourceFilter[T]{
+ authorizer: authorizer,
+ config: config,
+ }
+}
+
+// FilterFn defines a function that generates authorization resources for an item
+type FilterFn[T any] func(item T) string
+
+// Filter filters a slice of items based on authorization
+func (f *ResourceFilter[T]) Filter(
+ ctx context.Context,
+ logger logrus.FieldLogger,
+ principal *models.Principal,
+ items []T,
+ verb string,
+ resourceFn FilterFn[T],
+) []T {
+ if len(items) == 0 {
+ return items
+ }
+ if !f.config.Enabled {
+ // here it's either you have the permissions or not so 1 check is enough
+ if err := f.authorizer.Authorize(ctx, principal, verb, resourceFn(items[0])); err != nil {
+ logger.WithFields(logrus.Fields{
+ "username": principal.Username,
+ "verb": verb,
+ "resources": items,
+ }).Error(err)
+ return nil
+ }
+ return items
+ }
+
+ // For RBAC, first check if all items have the same parent resource
+ firstResource := resourceFn(items[0])
+ allSameParent := true
+
+ for i := 1; i < len(items); i++ {
+ if authorization.WildcardPath(resourceFn(items[i])) != authorization.WildcardPath(firstResource) {
+ allSameParent = false
+ }
+ }
+
+ // If all items have the same parent, we can do a single authorization check
+ if allSameParent {
+ err := f.authorizer.Authorize(ctx, principal, verb, authorization.WildcardPath(firstResource))
+ if err != nil {
+ logger.WithFields(logrus.Fields{
+ "username": principal.Username,
+ "verb": verb,
+ "resource": authorization.WildcardPath(firstResource),
+ }).Error(err)
+ }
+
+ if err == nil {
+ // user is authorized
+ return items
+ }
+ }
+
+ // For RBAC, filter based on per-item authorization
+ filtered := make([]T, 0, len(items))
+ resources := make([]string, 0, len(items))
+ for _, item := range items {
+ resources = append(resources, resourceFn(item))
+ }
+
+ allowedList, err := f.authorizer.FilterAuthorizedResources(ctx, principal, verb, resources...)
+ if err != nil {
+ logger.WithFields(logrus.Fields{
+ "username": principal.Username,
+ "verb": verb,
+ "resources": resources,
+ }).Error(err)
+ }
+
+ if len(allowedList) == len(resources) {
+ // has permissions to all
+ return items
+ }
+
+ for _, item := range items {
+ if slices.Contains(allowedList, resourceFn(item)) {
+ filtered = append(filtered, item)
+ }
+ }
+
+ return filtered
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/filter/filter_test.go b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/filter/filter_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..6b97258fb9f827f56bae11810b961fa1445f6542
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/filter/filter_test.go
@@ -0,0 +1,64 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package filter
+
+import (
+ "context"
+ "testing"
+
+ "github.com/sirupsen/logrus/hooks/test"
+ "github.com/stretchr/testify/require"
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/usecases/auth/authorization"
+ "github.com/weaviate/weaviate/usecases/auth/authorization/mocks"
+ "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf"
+)
+
+func TestFilter(t *testing.T) {
+ tests := []struct {
+ Name string
+ Config rbacconf.Config
+ Items []*models.Object
+ }{
+ {
+ Name: "rbac enabled, no objects",
+ Items: []*models.Object{},
+ Config: rbacconf.Config{Enabled: true},
+ },
+ {
+ Name: "rbac disenabled, no objects",
+ Items: []*models.Object{},
+ Config: rbacconf.Config{Enabled: false},
+ },
+ }
+
+ l, _ := test.NewNullLogger()
+
+ authorizer := mocks.NewMockAuthorizer()
+ for _, tt := range tests {
+ t.Run(tt.Name, func(t *testing.T) {
+ resourceFilter := New[*models.Object](authorizer, tt.Config)
+ filteredObjects := resourceFilter.Filter(
+ context.Background(),
+ l,
+ &models.Principal{Username: "user"},
+ tt.Items,
+ authorization.READ,
+ func(obj *models.Object) string {
+ return ""
+ },
+ )
+
+ require.Equal(t, len(tt.Items), len(filteredObjects))
+ })
+ }
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/mocks/authorizer.go b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/mocks/authorizer.go
new file mode 100644
index 0000000000000000000000000000000000000000..248bf2e2377eaf0bfb6f48b5c46a6481549da532
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/mocks/authorizer.go
@@ -0,0 +1,61 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package mocks
+
+import (
+ "context"
+
+ models "github.com/weaviate/weaviate/entities/models"
+)
+
+type AuthZReq struct {
+ Principal *models.Principal
+ Verb string
+ Resources []string
+}
+
+type FakeAuthorizer struct {
+ err error
+ requests []AuthZReq
+}
+
+func NewMockAuthorizer() *FakeAuthorizer {
+ return &FakeAuthorizer{}
+}
+
+func (a *FakeAuthorizer) SetErr(err error) {
+ a.err = err
+}
+
+// Authorize provides a mock function with given fields: principal, verb, resource
+func (a *FakeAuthorizer) Authorize(ctx context.Context, principal *models.Principal, verb string, resources ...string) error {
+ a.requests = append(a.requests, AuthZReq{principal, verb, resources})
+ if a.err != nil {
+ return a.err
+ }
+ return nil
+}
+
+func (a *FakeAuthorizer) AuthorizeSilent(ctx context.Context, principal *models.Principal, verb string, resources ...string) error {
+ return a.Authorize(ctx, principal, verb, resources...)
+}
+
+func (a *FakeAuthorizer) FilterAuthorizedResources(ctx context.Context, principal *models.Principal, verb string, resources ...string) ([]string, error) {
+ if err := a.Authorize(ctx, principal, verb, resources...); err != nil {
+ return nil, err
+ }
+ return resources, nil
+}
+
+func (a *FakeAuthorizer) Calls() []AuthZReq {
+ return a.requests
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/authorizer.go b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/authorizer.go
new file mode 100644
index 0000000000000000000000000000000000000000..fdfd15ed806699dd43bb33cfaf23b28fccce0024
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/authorizer.go
@@ -0,0 +1,171 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package rbac
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/usecases/auth/authorization"
+ "github.com/weaviate/weaviate/usecases/auth/authorization/conv"
+ "github.com/weaviate/weaviate/usecases/auth/authorization/errors"
+)
+
+const AuditLogVersion = 2
+
+func (m *Manager) authorize(ctx context.Context, principal *models.Principal, verb string, skipAudit bool, resources ...string) error {
+ if principal == nil {
+ return fmt.Errorf("rbac: %w", errors.NewUnauthenticated())
+ }
+
+ if len(resources) == 0 {
+ return fmt.Errorf("at least 1 resource is required")
+ }
+
+ logger := m.logger.WithFields(logrus.Fields{
+ "action": "authorize",
+ "user": principal.Username,
+ "component": authorization.ComponentName,
+ "request_action": verb,
+ "rbac_log_version": AuditLogVersion,
+ })
+ if !m.rbacConf.IpInAuditDisabled {
+ sourceIp := ctx.Value("sourceIp")
+ logger = logger.WithField("source_ip", sourceIp)
+ }
+
+ if len(principal.Groups) > 0 {
+ logger = logger.WithField("groups", principal.Groups)
+ }
+
+ // Create a map to aggregate resources and their counts while preserving order
+ resourceCounts := make(map[string]int)
+ var uniqueResources []string
+ for _, resource := range resources {
+ if _, exists := resourceCounts[resource]; !exists {
+ uniqueResources = append(uniqueResources, resource)
+ }
+ resourceCounts[resource]++
+ }
+ permResults := make([]logrus.Fields, 0, len(uniqueResources))
+
+ for _, resource := range uniqueResources {
+ allowed, err := m.checkPermissions(principal, resource, verb)
+ if err != nil {
+ logger.WithFields(logrus.Fields{
+ "resource": resource,
+ }).WithError(err).Error("failed to enforce policy")
+ return err
+ }
+
+ perm, err := conv.PathToPermission(verb, resource)
+ if err != nil {
+ return fmt.Errorf("rbac: %w", err)
+ }
+
+ if allowed {
+ permResults = append(permResults, logrus.Fields{
+ "resource": prettyPermissionsResources(perm),
+ "results": prettyStatus(allowed),
+ })
+ }
+
+ if !allowed {
+ if !skipAudit {
+ logger.WithField("permissions", permResults).Error("authorization denied")
+ }
+ return fmt.Errorf("rbac: %w", errors.NewForbidden(principal, prettyPermissionsActions(perm), prettyPermissionsResources(perm)))
+ }
+ }
+
+ // Log all results at once if audit is enabled
+ if !skipAudit {
+ logger.WithField("permissions", permResults).Info()
+ }
+
+ return nil
+}
+
+// Authorize verify if the user has access to a resource to do specific action
+func (m *Manager) Authorize(ctx context.Context, principal *models.Principal, verb string, resources ...string) error {
+ return m.authorize(ctx, principal, verb, false, resources...)
+}
+
+// AuthorizeSilent verify if the user has access to a resource to do specific action without audit logs
+// to be used internally
+func (m *Manager) AuthorizeSilent(ctx context.Context, principal *models.Principal, verb string, resources ...string) error {
+ return m.authorize(ctx, principal, verb, true, resources...)
+}
+
+// FilterAuthorizedResources authorize the passed resources with best effort approach, it will return
+// list of allowed resources, if none, it will return an empty slice
+func (m *Manager) FilterAuthorizedResources(ctx context.Context, principal *models.Principal, verb string, resources ...string) ([]string, error) {
+ if principal == nil {
+ return nil, errors.NewUnauthenticated()
+ }
+
+ if len(resources) == 0 {
+ return nil, fmt.Errorf("at least 1 resource is required")
+ }
+
+ logger := m.logger.WithFields(logrus.Fields{
+ "action": "authorize",
+ "user": principal.Username,
+ "component": authorization.ComponentName,
+ "request_action": verb,
+ })
+
+ if len(principal.Groups) > 0 {
+ logger = logger.WithField("groups", principal.Groups)
+ }
+
+ allowedResources := make([]string, 0, len(resources))
+
+ // Create a map to aggregate resources and their counts while preserving order
+ resourceCounts := make(map[string]int)
+ var uniqueResources []string
+ for _, resource := range resources {
+ if _, exists := resourceCounts[resource]; !exists {
+ uniqueResources = append(uniqueResources, resource)
+ }
+ resourceCounts[resource]++
+ }
+
+ permResults := make([]logrus.Fields, 0, len(uniqueResources))
+
+ for _, resource := range uniqueResources {
+ allowed, err := m.checkPermissions(principal, resource, verb)
+ if err != nil {
+ logger.WithError(err).WithField("resource", resource).Error("failed to enforce policy")
+ return nil, err
+ }
+
+ if allowed {
+ perm, err := conv.PathToPermission(verb, resource)
+ if err != nil {
+ return nil, err
+ }
+
+ permResults = append(permResults, logrus.Fields{
+ "resource": prettyPermissionsResources(perm),
+ "results": prettyStatus(allowed),
+ })
+ allowedResources = append(allowedResources, resource)
+ }
+ }
+
+ logger.WithField("permissions", permResults).Info()
+ return allowedResources, nil
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/authorizer_test.go b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/authorizer_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..ff5b8b14db6fa92e9d58277f8e3610b43f9d7f51
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/authorizer_test.go
@@ -0,0 +1,640 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package rbac
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/weaviate/weaviate/usecases/auth/authentication"
+
+ "github.com/sirupsen/logrus"
+ "github.com/sirupsen/logrus/hooks/test"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/usecases/auth/authorization"
+ "github.com/weaviate/weaviate/usecases/auth/authorization/conv"
+ authzErrors "github.com/weaviate/weaviate/usecases/auth/authorization/errors"
+ "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf"
+ "github.com/weaviate/weaviate/usecases/config"
+)
+
+func TestAuthorize(t *testing.T) {
+ tests := []struct {
+ name string
+ principal *models.Principal
+ verb string
+ resources []string
+ skipAudit bool
+ setupPolicies func(*Manager) error
+ wantErr bool
+ errContains string
+ }{
+ {
+ name: "nil principal returns unauthenticated error",
+ principal: nil,
+ verb: authorization.READ,
+ resources: authorization.CollectionsMetadata("Test"),
+ wantErr: true,
+ },
+ {
+ name: "empty resources returns error",
+ principal: &models.Principal{
+ Username: "test-user",
+ Groups: []string{},
+ },
+ verb: authorization.READ,
+ resources: []string{},
+ wantErr: true,
+ errContains: "at least 1 resource is required",
+ },
+ {
+ name: "authorized user with correct permissions",
+ principal: &models.Principal{
+ Username: "admin-user",
+ Groups: []string{"admin-group"},
+ UserType: models.UserTypeInputDb,
+ },
+ verb: authorization.READ,
+ resources: authorization.CollectionsMetadata("Test1", "Test2"),
+ setupPolicies: func(m *Manager) error {
+ _, err := m.casbin.AddNamedPolicy("p", conv.PrefixRoleName("admin"), "*", authorization.SchemaDomain, authorization.READ)
+ if err != nil {
+ return err
+ }
+ ok, err := m.casbin.AddRoleForUser(conv.UserNameWithTypeFromId("admin-user", authentication.AuthTypeDb),
+ conv.PrefixRoleName("admin"))
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return fmt.Errorf("failed to add role for user")
+ }
+ return nil
+ },
+ },
+ {
+ name: "unauthorized user returns forbidden error",
+ principal: &models.Principal{
+ Username: "regular-user",
+ Groups: []string{},
+ },
+ verb: authorization.UPDATE,
+ resources: authorization.CollectionsMetadata("Test1"),
+ wantErr: true,
+ errContains: "forbidden",
+ },
+ {
+ name: "partial authorization fails completely",
+ principal: &models.Principal{
+ Username: "partial-user",
+ Groups: []string{},
+ UserType: models.UserTypeInputDb,
+ },
+ verb: authorization.READ,
+ resources: authorization.CollectionsMetadata("Test1", "Test2"),
+ setupPolicies: func(m *Manager) error {
+ _, err := m.casbin.AddNamedPolicy("p", conv.PrefixRoleName("partial"), authorization.CollectionsMetadata("Test1")[0], authorization.READ, authorization.SchemaDomain)
+ if err != nil {
+ return err
+ }
+ ok, err := m.casbin.AddRoleForUser(conv.UserNameWithTypeFromId("partial-user", authentication.AuthTypeDb),
+ conv.PrefixRoleName("partial"))
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return fmt.Errorf("failed to add role for user")
+ }
+ return nil
+ },
+ wantErr: true,
+ errContains: "Test2",
+ },
+ {
+ name: "group-based authorization",
+ principal: &models.Principal{
+ Username: "group-user",
+ Groups: []string{"authorized-group"},
+ },
+ verb: authorization.READ,
+ resources: authorization.CollectionsMetadata("Test1"),
+ setupPolicies: func(m *Manager) error {
+ _, err := m.casbin.AddNamedPolicy("p", conv.PrefixRoleName("group-role"), authorization.CollectionsMetadata("Test1")[0], authorization.READ, authorization.SchemaDomain)
+ if err != nil {
+ return err
+ }
+ ok, err := m.casbin.AddRoleForUser(conv.PrefixGroupName("authorized-group"),
+ conv.PrefixRoleName("group-role"))
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return fmt.Errorf("failed to add role for group")
+ }
+ return nil
+ },
+ },
+ {
+ name: "audit logging can be skipped",
+ principal: &models.Principal{
+ Username: "audit-test-user",
+ Groups: []string{},
+ UserType: models.UserTypeInputDb,
+ },
+ verb: authorization.READ,
+ resources: authorization.CollectionsMetadata("Test1"),
+ skipAudit: true,
+ setupPolicies: func(m *Manager) error {
+ _, err := m.casbin.AddNamedPolicy("p", conv.PrefixRoleName("audit-role"), authorization.CollectionsMetadata("Test1")[0], authorization.READ, authorization.SchemaDomain)
+ if err != nil {
+ return err
+ }
+ ok, err := m.casbin.AddRoleForUser(conv.UserNameWithTypeFromId("audit-test-user", authentication.AuthTypeDb),
+ conv.PrefixRoleName("audit-role"))
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return fmt.Errorf("failed to add role for user")
+ }
+ return nil
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Setup logger with hook for testing
+ logger, hook := test.NewNullLogger()
+ m, err := setupTestManager(t, logger)
+ require.NoError(t, err)
+
+ // Setup policies if needed
+ if tt.setupPolicies != nil {
+ err := tt.setupPolicies(m)
+ require.NoError(t, err)
+ }
+
+ // Execute
+ err = m.authorize(context.Background(), tt.principal, tt.verb, tt.skipAudit, tt.resources...)
+
+ // Assert error conditions
+ if tt.wantErr {
+ require.Error(t, err)
+ if tt.errContains != "" {
+ assert.Contains(t, err.Error(), tt.errContains)
+ }
+ return
+ }
+
+ require.NoError(t, err)
+
+ // Verify logging behavior
+ if !tt.skipAudit {
+ require.NotEmpty(t, hook.AllEntries())
+ lastEntry := hook.LastEntry()
+ require.NotNil(t, lastEntry)
+
+ // Verify log fields
+ assert.Equal(t, "authorize", lastEntry.Data["action"])
+ assert.Equal(t, tt.principal.Username, lastEntry.Data["user"])
+ assert.Equal(t, authorization.ComponentName, lastEntry.Data["component"])
+ assert.Equal(t, tt.verb, lastEntry.Data["request_action"])
+
+ if len(tt.principal.Groups) > 0 {
+ assert.Contains(t, lastEntry.Data, "groups")
+ assert.ElementsMatch(t, tt.principal.Groups, lastEntry.Data["groups"])
+ }
+ } else {
+ // Verify no info logs when audit is skipped
+ for _, entry := range hook.AllEntries() {
+ assert.NotEqual(t, logrus.InfoLevel, entry.Level)
+ }
+ }
+ })
+ }
+}
+
+func TestFilterAuthorizedResources(t *testing.T) {
+ tests := []struct {
+ name string
+ principal *models.Principal
+ verb string
+ resources []string
+ setupPolicies func(*Manager) error
+ wantResources []string
+ wantErr bool
+ errType error
+ }{
+ {
+ name: "nil principal returns unauthenticated error",
+ principal: nil,
+ verb: authorization.READ,
+ resources: authorization.CollectionsMetadata("Test"),
+ wantErr: true,
+ errType: authzErrors.Unauthenticated{},
+ },
+ {
+ name: "wildcard permission allows all resources",
+ principal: &models.Principal{
+ Username: "admin-user",
+ UserType: models.UserTypeInputDb,
+ },
+ verb: authorization.READ,
+ resources: authorization.CollectionsMetadata("Test1", "Test2"),
+ setupPolicies: func(m *Manager) error {
+ _, err := m.casbin.AddNamedPolicy("p", conv.PrefixRoleName("admin"),
+ "*", authorization.READ, authorization.SchemaDomain)
+ if err != nil {
+ return err
+ }
+ ok, err := m.casbin.AddRoleForUser(conv.UserNameWithTypeFromId("admin-user", authentication.AuthTypeDb),
+ conv.PrefixRoleName("admin"))
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return fmt.Errorf("failed to add role for user")
+ }
+ return nil
+ },
+ wantResources: authorization.CollectionsMetadata("Test1", "Test2"),
+ },
+ {
+ name: "specific permission allows only matching resource",
+ principal: &models.Principal{
+ Username: "limited-user",
+ UserType: models.UserTypeInputDb,
+ },
+ verb: authorization.READ,
+ resources: authorization.CollectionsMetadata("Test1", "Test2"),
+ setupPolicies: func(m *Manager) error {
+ _, err := m.casbin.AddNamedPolicy("p", conv.PrefixRoleName("limited"),
+ authorization.CollectionsMetadata("Test1")[0], authorization.READ, authorization.SchemaDomain)
+ if err != nil {
+ return err
+ }
+ ok, err := m.casbin.AddRoleForUser(conv.UserNameWithTypeFromId("limited-user", authentication.AuthTypeDb),
+ conv.PrefixRoleName("limited"))
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return fmt.Errorf("failed to add role for user")
+ }
+ return nil
+ },
+ wantResources: authorization.CollectionsMetadata("Test1"),
+ },
+ {
+ name: "no permissions returns empty list",
+ principal: &models.Principal{
+ Username: "no-perm-user",
+ },
+ verb: authorization.READ,
+ resources: authorization.CollectionsMetadata("Test1", "Test2"),
+ wantResources: []string{},
+ },
+ {
+ name: "wildcard collection permission allows all collections",
+ principal: &models.Principal{
+ Username: "collections-admin",
+ UserType: models.UserTypeInputDb,
+ },
+ verb: authorization.READ,
+ resources: authorization.CollectionsMetadata("Test1", "Test2"),
+ setupPolicies: func(m *Manager) error {
+ _, err := m.casbin.AddNamedPolicy("p", conv.PrefixRoleName("collections-admin"),
+ authorization.CollectionsMetadata()[0], authorization.READ, authorization.SchemaDomain)
+ if err != nil {
+ return err
+ }
+ ok, err := m.casbin.AddRoleForUser(conv.UserNameWithTypeFromId("collections-admin", authentication.AuthTypeDb),
+ conv.PrefixRoleName("collections-admin"))
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return fmt.Errorf("failed to add role for user")
+ }
+ return nil
+ },
+ wantResources: authorization.CollectionsMetadata("Test1", "Test2"),
+ },
+ {
+ name: "empty resources list returns empty result",
+ principal: &models.Principal{
+ Username: "test-user",
+ },
+ verb: authorization.READ,
+ resources: []string{},
+ wantResources: []string{},
+ wantErr: true,
+ errType: fmt.Errorf("at least 1 resource is required"),
+ },
+ {
+ name: "user with multiple roles",
+ principal: &models.Principal{
+ Username: "multi-role-user",
+ UserType: models.UserTypeInputDb,
+ },
+ verb: authorization.READ,
+ resources: authorization.CollectionsMetadata("Test1", "Test2", "Test3"),
+ setupPolicies: func(m *Manager) error {
+ if _, err := m.casbin.AddNamedPolicy("p", conv.PrefixRoleName("role1"), authorization.CollectionsMetadata("Test1")[0], authorization.READ, authorization.SchemaDomain); err != nil {
+ return err
+ }
+ if _, err := m.casbin.AddNamedPolicy("p", conv.PrefixRoleName("role2"), authorization.CollectionsMetadata("Test2")[0], authorization.READ, authorization.SchemaDomain); err != nil {
+ return err
+ }
+ if ok, err := m.casbin.AddRoleForUser(conv.UserNameWithTypeFromId("multi-role-user", authentication.AuthTypeDb), conv.PrefixRoleName("role1")); err != nil {
+ return err
+ } else if !ok {
+ return fmt.Errorf("failed to add role for user")
+ }
+ if ok, err := m.casbin.AddRoleForUser(conv.UserNameWithTypeFromId("multi-role-user", authentication.AuthTypeDb), conv.PrefixRoleName("role2")); err != nil {
+ return err
+ } else if !ok {
+ return fmt.Errorf("failed to add role for user")
+ }
+ return nil
+ },
+ wantResources: authorization.CollectionsMetadata("Test1", "Test2"),
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Setup
+ logger, _ := test.NewNullLogger()
+ m, err := setupTestManager(t, logger)
+ require.NoError(t, err)
+
+ // Setup policies if needed
+ if tt.setupPolicies != nil {
+ err := tt.setupPolicies(m)
+ require.NoError(t, err)
+ }
+
+ // Execute
+ got, err := m.FilterAuthorizedResources(context.Background(), tt.principal, tt.verb, tt.resources...)
+
+ // Assert
+ if tt.wantErr {
+ require.Error(t, err)
+ if tt.errType != nil {
+ assert.Contains(t, err.Error(), tt.errType.Error())
+ }
+ return
+ }
+
+ require.NoError(t, err)
+ assert.ElementsMatch(t, tt.wantResources, got)
+ })
+ }
+}
+
+func TestFilterAuthorizedResourcesLogging(t *testing.T) {
+ logger, hook := test.NewNullLogger()
+ m, err := setupTestManager(t, logger)
+ require.NoError(t, err)
+
+ principal := &models.Principal{
+ Username: "test-user",
+ Groups: []string{"group1"},
+ UserType: models.UserTypeInputDb,
+ }
+
+ testResources := authorization.CollectionsMetadata("Test1", "Test2")
+
+ // Setup a policy
+ _, err = m.casbin.AddNamedPolicy("p", conv.PrefixRoleName("admin"), "*", "*", authorization.RolesDomain)
+ require.NoError(t, err)
+ _, err = m.casbin.AddRoleForUser(conv.UserNameWithTypeFromId("test-user", authentication.AuthTypeDb), conv.PrefixRoleName("admin"))
+ require.NoError(t, err)
+
+ // Call the function
+ allowedResources, err := m.FilterAuthorizedResources(context.Background(), principal, authorization.READ, testResources...)
+ require.NoError(t, err)
+
+ // Verify logging
+ require.NotEmpty(t, hook.AllEntries())
+ entry := hook.LastEntry()
+ require.NotNil(t, entry)
+
+ // Check the permissions array exists and has the correct structure
+ permissions, ok := entry.Data["permissions"].([]logrus.Fields)
+ require.True(t, ok, "permissions should be []logrus.Fields")
+ require.NotEmpty(t, permissions, "permissions should not be empty")
+
+ // Check that we have entries for both resources
+ require.Len(t, permissions, 2, "Should have permissions entries for both resources")
+
+ // Check the first permission entry
+ firstPerm := permissions[0]
+ assert.Contains(t, firstPerm, "resource", "First permission entry should contain resource field")
+ assert.Contains(t, firstPerm, "results", "First permission entry should contain results field")
+ assert.Equal(t, "[Domain: collections, Collection: Test1]", firstPerm["resource"])
+ assert.Equal(t, "success", firstPerm["results"])
+
+ // Check the second permission entry
+ secondPerm := permissions[1]
+ assert.Contains(t, secondPerm, "resource", "Second permission entry should contain resource field")
+ assert.Contains(t, secondPerm, "results", "Second permission entry should contain results field")
+ assert.Equal(t, "[Domain: collections, Collection: Test2]", secondPerm["resource"])
+ assert.Equal(t, "success", secondPerm["results"])
+
+ // Check other required fields
+ assert.Equal(t, "authorize", entry.Data["action"])
+ assert.Equal(t, principal.Username, entry.Data["user"])
+ assert.Equal(t, principal.Groups, entry.Data["groups"])
+ assert.Equal(t, authorization.ComponentName, entry.Data["component"])
+ assert.Equal(t, authorization.READ, entry.Data["request_action"])
+
+ // Verify the final result matches the logged permissions
+ assert.ElementsMatch(t, testResources, allowedResources,
+ "Allowed resources should match input resources")
+}
+
+func TestAuthorizeResourceAggregation(t *testing.T) {
+ // Setup proper logger with hook for testing
+ logger := logrus.New()
+ logger.SetLevel(logrus.InfoLevel)
+
+ // Create a hook to capture log entries
+ hook := &test.Hook{}
+ logger.AddHook(hook)
+
+ m, err := setupTestManager(t, logger)
+ require.NoError(t, err)
+
+ // Setup admin policy
+ _, err = m.casbin.AddNamedPolicy("p", conv.PrefixRoleName("admin"), "*", authorization.READ, authorization.DataDomain)
+ require.NoError(t, err)
+ ok, err := m.casbin.AddRoleForUser(conv.UserNameWithTypeFromId("admin-user", authentication.AuthTypeDb),
+ conv.PrefixRoleName("admin"))
+ require.NoError(t, err)
+ require.True(t, ok)
+
+ principal := &models.Principal{
+ Username: "admin-user",
+ Groups: []string{"admin-group"},
+ UserType: models.UserTypeInputDb,
+ }
+
+ // Test with 1000 duplicate resources (simulating the original issue)
+ resources := make([]string, 1000)
+ for i := 0; i < 1000; i++ {
+ resources[i] = "data/collections/ContactRecommendations/shards/*/objects/*"
+ }
+
+ // Execute authorization
+ err = m.authorize(context.Background(), principal, authorization.READ, false, resources...)
+ require.NoError(t, err)
+
+ // Verify logging behavior
+ require.NotEmpty(t, hook.AllEntries())
+ lastEntry := hook.LastEntry()
+ require.NotNil(t, lastEntry)
+
+ // Verify log fields
+ assert.Equal(t, "authorize", lastEntry.Data["action"])
+ assert.Equal(t, "admin-user", lastEntry.Data["user"])
+ assert.Equal(t, authorization.ComponentName, lastEntry.Data["component"])
+ assert.Equal(t, authorization.READ, lastEntry.Data["request_action"])
+
+ // Verify permissions field exists
+ permissions, ok := lastEntry.Data["permissions"].([]logrus.Fields)
+ require.True(t, ok, "permissions field should be present")
+
+ // Verify aggregation - should only have 1 entry instead of 1000
+ assert.Len(t, permissions, 1, "should aggregate 1000 duplicate resources into 1 entry")
+
+ // Verify the single entry has the correct resource and count
+ require.Len(t, permissions, 1)
+ perm := permissions[0]
+
+ resource, ok := perm["resource"].(string)
+ require.True(t, ok, "resource should be a string")
+ assert.Equal(t, "[Domain: data, Collection: ContactRecommendations, Tenant: *, Object: *]", resource)
+
+ // Verify aggregation by checking that we have fewer log entries than resources
+ // This proves that 1000 identical resources were aggregated into 1 log entry
+ assert.Len(t, permissions, 1, "should aggregate 1000 duplicate resources into 1 log entry")
+
+ results, ok := perm["results"].(string)
+ require.True(t, ok, "results should be a string")
+ assert.Equal(t, "success", results)
+}
+
+func TestFilterAuthorizedResourcesAggregation(t *testing.T) {
+ // Setup proper logger with hook for testing
+ logger := logrus.New()
+ logger.SetLevel(logrus.InfoLevel)
+
+ // Create a hook to capture log entries
+ hook := &test.Hook{}
+ logger.AddHook(hook)
+
+ m, err := setupTestManager(t, logger)
+ require.NoError(t, err)
+
+ // Setup admin policy
+ _, err = m.casbin.AddNamedPolicy("p", conv.PrefixRoleName("admin"), "*", authorization.READ, authorization.DataDomain)
+ require.NoError(t, err)
+ ok, err := m.casbin.AddRoleForUser(conv.UserNameWithTypeFromId("admin-user", authentication.AuthTypeDb),
+ conv.PrefixRoleName("admin"))
+ require.NoError(t, err)
+ require.True(t, ok)
+
+ principal := &models.Principal{
+ Username: "admin-user",
+ Groups: []string{"admin-group"},
+ UserType: models.UserTypeInputDb,
+ }
+
+ // Test with 1000 duplicate resources (simulating the original issue)
+ resources := make([]string, 1000)
+ for i := 0; i < 1000; i++ {
+ resources[i] = "data/collections/ContactRecommendations/shards/*/objects/*"
+ }
+
+ // Execute FilterAuthorizedResources
+ allowedResources, err := m.FilterAuthorizedResources(context.Background(), principal, authorization.READ, resources...)
+ require.NoError(t, err)
+
+ // Verify logging behavior
+ require.NotEmpty(t, hook.AllEntries())
+ lastEntry := hook.LastEntry()
+ require.NotNil(t, lastEntry)
+
+ // Verify log fields
+ assert.Equal(t, "authorize", lastEntry.Data["action"])
+ assert.Equal(t, "admin-user", lastEntry.Data["user"])
+ assert.Equal(t, authorization.ComponentName, lastEntry.Data["component"])
+ assert.Equal(t, authorization.READ, lastEntry.Data["request_action"])
+
+ // Verify permissions field exists
+ permissions, ok := lastEntry.Data["permissions"].([]logrus.Fields)
+ require.True(t, ok, "permissions field should be present")
+
+ // Verify aggregation - should only have 1 entry instead of 1000
+ assert.Len(t, permissions, 1, "should aggregate 1000 duplicate resources into 1 entry")
+
+ // Verify the single entry has the correct resource and count
+ require.Len(t, permissions, 1)
+ perm := permissions[0]
+
+ resource, ok := perm["resource"].(string)
+ require.True(t, ok, "resource should be a string")
+ assert.Equal(t, "[Domain: data, Collection: ContactRecommendations, Tenant: *, Object: *]", resource)
+
+ results, ok := perm["results"].(string)
+ require.True(t, ok, "results should be a string")
+ assert.Equal(t, "success", results)
+
+ // Verify that all 1000 resources are returned in allowedResources
+ assert.Len(t, allowedResources, 1, "should return 1 unique resource (duplicates are aggregated)")
+
+ // Verify the returned resource is correct
+ assert.Equal(t, "data/collections/ContactRecommendations/shards/*/objects/*", allowedResources[0], "returned resource should be the same as input")
+}
+
+func setupTestManager(t *testing.T, logger *logrus.Logger) (*Manager, error) {
+ tmpDir, err := os.MkdirTemp("", "rbac-test-*")
+ if err != nil {
+ return nil, err
+ }
+
+ t.Cleanup(func() {
+ os.RemoveAll(tmpDir)
+ })
+
+ rbacDir := filepath.Join(tmpDir, "rbac")
+ if err := os.MkdirAll(rbacDir, 0o755); err != nil {
+ return nil, err
+ }
+
+ policyPath := filepath.Join(rbacDir, "policy.csv")
+
+ conf := rbacconf.Config{
+ Enabled: true,
+ }
+
+ return New(policyPath, conf, config.Authentication{OIDC: config.OIDC{Enabled: true}, APIKey: config.StaticAPIKey{Enabled: true, Users: []string{"test-user"}}}, logger)
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/manager.go b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/manager.go
new file mode 100644
index 0000000000000000000000000000000000000000..72dff3c962267e69702fe14ebc3f898dd7ade777
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/manager.go
@@ -0,0 +1,595 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package rbac
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "slices"
+ "strings"
+ "sync"
+
+ "github.com/weaviate/weaviate/usecases/auth/authentication"
+
+ "github.com/casbin/casbin/v2"
+ "github.com/sirupsen/logrus"
+
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/usecases/auth/authorization"
+ "github.com/weaviate/weaviate/usecases/auth/authorization/conv"
+ "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf"
+ "github.com/weaviate/weaviate/usecases/config"
+)
+
+const (
+ SnapshotVersionV0 = iota
+ SnapshotVersionLatest
+)
+
+type Manager struct {
+ casbin *casbin.SyncedCachedEnforcer
+ logger logrus.FieldLogger
+ authNconf config.Authentication
+ rbacConf rbacconf.Config
+ backupLock sync.RWMutex
+}
+
+func New(rbacStoragePath string, rbacConf rbacconf.Config, authNconf config.Authentication, logger logrus.FieldLogger) (*Manager, error) {
+ csbin, err := Init(rbacConf, rbacStoragePath, authNconf)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Manager{csbin, logger, authNconf, rbacConf, sync.RWMutex{}}, nil
+}
+
+// there is no different between UpdateRolesPermissions and CreateRolesPermissions, purely to satisfy an interface
+func (m *Manager) UpdateRolesPermissions(roles map[string][]authorization.Policy) error {
+ m.backupLock.RLock()
+ defer m.backupLock.RUnlock()
+
+ return m.upsertRolesPermissions(roles)
+}
+
+func (m *Manager) CreateRolesPermissions(roles map[string][]authorization.Policy) error {
+ m.backupLock.RLock()
+ defer m.backupLock.RUnlock()
+
+ return m.upsertRolesPermissions(roles)
+}
+
+func (m *Manager) GetUsersOrGroupsWithRoles(isGroup bool, authType authentication.AuthType) ([]string, error) {
+ roles, err := m.casbin.GetAllSubjects()
+ if err != nil {
+ return nil, err
+ }
+ usersOrGroups := map[string]struct{}{}
+ for _, role := range roles {
+ users, err := m.casbin.GetUsersForRole(role)
+ if err != nil {
+ return nil, err
+ }
+ for _, userOrGroup := range users {
+ name, _ := conv.GetUserAndPrefix(userOrGroup)
+ if isGroup {
+ // groups are only supported for OIDC
+ if authType == authentication.AuthTypeOIDC && strings.HasPrefix(userOrGroup, conv.OIDC_GROUP_NAME_PREFIX) {
+ usersOrGroups[name] = struct{}{}
+ }
+ } else {
+ // groups are only supported for OIDC
+ if authType == authentication.AuthTypeOIDC && strings.HasPrefix(userOrGroup, string(authentication.AuthTypeOIDC)) {
+ usersOrGroups[name] = struct{}{}
+ }
+
+ if authType == authentication.AuthTypeDb && strings.HasPrefix(userOrGroup, string(authentication.AuthTypeDb)) {
+ usersOrGroups[name] = struct{}{}
+ }
+
+ }
+ }
+ }
+
+ usersOrGroupsList := make([]string, 0, len(usersOrGroups))
+ for user := range usersOrGroups {
+ usersOrGroupsList = append(usersOrGroupsList, user)
+ }
+
+ return usersOrGroupsList, nil
+}
+
+func (m *Manager) upsertRolesPermissions(roles map[string][]authorization.Policy) error {
+ for roleName, policies := range roles {
+ // assign role to internal user to make sure to catch empty roles
+ // e.g. : g, user:wv_internal_empty, role:roleName
+ if _, err := m.casbin.AddRoleForUser(conv.UserNameWithTypeFromId(conv.InternalPlaceHolder, authentication.AuthTypeDb), conv.PrefixRoleName(roleName)); err != nil {
+ return fmt.Errorf("AddRoleForUser: %w", err)
+ }
+ for _, policy := range policies {
+ if _, err := m.casbin.AddNamedPolicy("p", conv.PrefixRoleName(roleName), policy.Resource, policy.Verb, policy.Domain); err != nil {
+ return fmt.Errorf("AddNamedPolicy: %w", err)
+ }
+ }
+ }
+ if err := m.casbin.SavePolicy(); err != nil {
+ return fmt.Errorf("SavePolicy: %w", err)
+ }
+ if err := m.casbin.InvalidateCache(); err != nil {
+ return fmt.Errorf("InvalidateCache: %w", err)
+ }
+ return nil
+}
+
+func (m *Manager) GetRoles(names ...string) (map[string][]authorization.Policy, error) {
+ m.backupLock.RLock()
+ defer m.backupLock.RUnlock()
+
+ var (
+ casbinStoragePolicies [][][]string
+ casbinStoragePoliciesMap = make(map[string]struct{})
+ )
+
+ if len(names) == 0 {
+ // get all roles
+ polices, err := m.casbin.GetNamedPolicy("p")
+ if err != nil {
+ return nil, fmt.Errorf("GetNamedPolicy: %w", err)
+ }
+ casbinStoragePolicies = append(casbinStoragePolicies, polices)
+
+ for _, p := range polices {
+ // e.g. policy line in casbin -> role:roleName resource verb domain, that's why p[0]
+ casbinStoragePoliciesMap[p[0]] = struct{}{}
+ }
+
+ polices, err = m.casbin.GetNamedGroupingPolicy("g")
+ if err != nil {
+ return nil, fmt.Errorf("GetNamedGroupingPolicy: %w", err)
+ }
+ casbinStoragePolicies = collectStaleRoles(polices, casbinStoragePoliciesMap, casbinStoragePolicies)
+ } else {
+ for _, name := range names {
+ polices, err := m.casbin.GetFilteredNamedPolicy("p", 0, conv.PrefixRoleName(name))
+ if err != nil {
+ return nil, fmt.Errorf("GetFilteredNamedPolicy: %w", err)
+ }
+ casbinStoragePolicies = append(casbinStoragePolicies, polices)
+
+ for _, p := range polices {
+ // e.g. policy line in casbin -> role:roleName resource verb domain, that's why p[0]
+ casbinStoragePoliciesMap[p[0]] = struct{}{}
+ }
+
+ polices, err = m.casbin.GetFilteredNamedGroupingPolicy("g", 1, conv.PrefixRoleName(name))
+ if err != nil {
+ return nil, fmt.Errorf("GetFilteredNamedGroupingPolicy: %w", err)
+ }
+ casbinStoragePolicies = collectStaleRoles(polices, casbinStoragePoliciesMap, casbinStoragePolicies)
+ }
+ }
+ policies, err := conv.CasbinPolicies(casbinStoragePolicies...)
+ if err != nil {
+ return nil, fmt.Errorf("CasbinPolicies: %w", err)
+ }
+ return policies, nil
+}
+
+func (m *Manager) RemovePermissions(roleName string, permissions []*authorization.Policy) error {
+ m.backupLock.RLock()
+ defer m.backupLock.RUnlock()
+
+ for _, permission := range permissions {
+ ok, err := m.casbin.RemoveNamedPolicy("p", conv.PrefixRoleName(roleName), permission.Resource, permission.Verb, permission.Domain)
+ if err != nil {
+ return fmt.Errorf("RemoveNamedPolicy: %w", err)
+ }
+ if !ok {
+ return nil // deletes are idempotent
+ }
+ }
+ if err := m.casbin.SavePolicy(); err != nil {
+ return fmt.Errorf("SavePolicy: %w", err)
+ }
+ if err := m.casbin.InvalidateCache(); err != nil {
+ return fmt.Errorf("InvalidateCache: %w", err)
+ }
+ return nil
+}
+
+func (m *Manager) HasPermission(roleName string, permission *authorization.Policy) (bool, error) {
+ m.backupLock.RLock()
+ defer m.backupLock.RUnlock()
+
+ policy, err := m.casbin.HasNamedPolicy("p", conv.PrefixRoleName(roleName), permission.Resource, permission.Verb, permission.Domain)
+ if err != nil {
+ return false, fmt.Errorf("HasNamedPolicy: %w", err)
+ }
+ return policy, nil
+}
+
+func (m *Manager) DeleteRoles(roles ...string) error {
+ m.backupLock.RLock()
+ defer m.backupLock.RUnlock()
+
+ for _, roleName := range roles {
+ // remove role
+ roleRemoved, err := m.casbin.RemoveFilteredNamedPolicy("p", 0, conv.PrefixRoleName(roleName))
+ if err != nil {
+ return fmt.Errorf("RemoveFilteredNamedPolicy: %w", err)
+ }
+ // remove role assignment
+ roleAssignmentsRemoved, err := m.casbin.RemoveFilteredGroupingPolicy(1, conv.PrefixRoleName(roleName))
+ if err != nil {
+ return fmt.Errorf("RemoveFilteredGroupingPolicy: %w", err)
+ }
+
+ if !roleRemoved && !roleAssignmentsRemoved {
+ return nil // deletes are idempotent
+ }
+ }
+ if err := m.casbin.SavePolicy(); err != nil {
+ return fmt.Errorf("SavePolicy: %w", err)
+ }
+ if err := m.casbin.InvalidateCache(); err != nil {
+ return fmt.Errorf("InvalidateCache: %w", err)
+ }
+ return nil
+}
+
+// AddRolesFroUser NOTE: user has to be prefixed by user:, group:, key: etc.
+// see func PrefixUserName(user) it will prefix username and nop-op if already prefixed
+func (m *Manager) AddRolesForUser(user string, roles []string) error {
+ m.backupLock.RLock()
+ defer m.backupLock.RUnlock()
+
+ if !conv.NameHasPrefix(user) {
+ return errors.New("user does not contain a prefix")
+ }
+
+ for _, role := range roles {
+ if _, err := m.casbin.AddRoleForUser(user, conv.PrefixRoleName(role)); err != nil {
+ return fmt.Errorf("AddRoleForUser: %w", err)
+ }
+ }
+ if err := m.casbin.SavePolicy(); err != nil {
+ return fmt.Errorf("SavePolicy: %w", err)
+ }
+ if err := m.casbin.InvalidateCache(); err != nil {
+ return fmt.Errorf("InvalidateCache: %w", err)
+ }
+ return nil
+}
+
+func (m *Manager) GetRolesForUserOrGroup(userName string, authType authentication.AuthType, isGroup bool) (map[string][]authorization.Policy, error) {
+ m.backupLock.RLock()
+ defer m.backupLock.RUnlock()
+
+ var rolesNames []string
+ var err error
+ if isGroup {
+ rolesNames, err = m.casbin.GetRolesForUser(conv.PrefixGroupName(userName))
+ if err != nil {
+ return nil, fmt.Errorf("GetRolesForUserOrGroup: %w", err)
+ }
+ } else {
+ rolesNames, err = m.casbin.GetRolesForUser(conv.UserNameWithTypeFromId(userName, authType))
+ if err != nil {
+ return nil, fmt.Errorf("GetRolesForUserOrGroup: %w", err)
+ }
+ }
+ if len(rolesNames) == 0 {
+ return map[string][]authorization.Policy{}, err
+ }
+ roles, err := m.GetRoles(rolesNames...)
+ if err != nil {
+ return nil, fmt.Errorf("GetRoles: %w", err)
+ }
+ return roles, err
+}
+
+func (m *Manager) GetUsersOrGroupForRole(roleName string, authType authentication.AuthType, isGroup bool) ([]string, error) {
+ m.backupLock.RLock()
+ defer m.backupLock.RUnlock()
+
+ pusers, err := m.casbin.GetUsersForRole(conv.PrefixRoleName(roleName))
+ if err != nil {
+ return nil, fmt.Errorf("GetUsersOrGroupForRole: %w", err)
+ }
+ users := make([]string, 0, len(pusers))
+ for idx := range pusers {
+ userOrGroup, prefix := conv.GetUserAndPrefix(pusers[idx])
+ if userOrGroup == conv.InternalPlaceHolder {
+ continue
+ }
+ if isGroup {
+ if authType == authentication.AuthTypeOIDC && strings.HasPrefix(conv.OIDC_GROUP_NAME_PREFIX, prefix) {
+ users = append(users, userOrGroup)
+ }
+ } else {
+ if prefix == string(authType) {
+ users = append(users, userOrGroup)
+ }
+ }
+ }
+ return users, nil
+}
+
+func (m *Manager) RevokeRolesForUser(userName string, roles ...string) error {
+ m.backupLock.RLock()
+ defer m.backupLock.RUnlock()
+
+ if !conv.NameHasPrefix(userName) {
+ return errors.New("user does not contain a prefix")
+ }
+
+ for _, roleName := range roles {
+ if _, err := m.casbin.DeleteRoleForUser(userName, conv.PrefixRoleName(roleName)); err != nil {
+ return fmt.Errorf("DeleteRoleForUser: %w", err)
+ }
+ }
+ if err := m.casbin.SavePolicy(); err != nil {
+ return fmt.Errorf("SavePolicy: %w", err)
+ }
+ if err := m.casbin.InvalidateCache(); err != nil {
+ return fmt.Errorf("InvalidateCache: %w", err)
+ }
+ return nil
+}
+
+// Snapshot is the RBAC state to be used for RAFT snapshots
+type snapshot struct {
+ Policy [][]string `json:"roles_policies"`
+ GroupingPolicy [][]string `json:"grouping_policies"`
+ Version int `json:"version"`
+}
+
+func (m *Manager) Snapshot() ([]byte, error) {
+ // snapshot isn't always initialized, e.g. when RBAC is disabled
+ if m == nil {
+ return []byte{}, nil
+ }
+ if m.casbin == nil {
+ return nil, nil
+ }
+
+ policy, err := m.casbin.GetPolicy()
+ if err != nil {
+ return nil, err
+ }
+ groupingPolicy, err := m.casbin.GetGroupingPolicy()
+ if err != nil {
+ return nil, err
+ }
+
+ // Use a buffer to stream the JSON encoding
+ var buf bytes.Buffer
+ if err := json.NewEncoder(&buf).Encode(snapshot{Policy: policy, GroupingPolicy: groupingPolicy, Version: SnapshotVersionLatest}); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func (m *Manager) Restore(b []byte) error {
+ // don't overwrite with empty snapshot to avoid overwriting recovery from file
+ // with a non-existent RBAC snapshot when coming from old versions
+ if m == nil || len(b) == 0 {
+ return nil
+ }
+ if m.casbin == nil {
+ return nil
+ }
+
+ snapshot := snapshot{}
+ if err := json.Unmarshal(b, &snapshot); err != nil {
+ return fmt.Errorf("restore snapshot: decode json: %w", err)
+ }
+
+ // we need to clear the policies before adding the new ones
+ m.casbin.ClearPolicy()
+
+ _, err := m.casbin.AddPolicies(snapshot.Policy)
+ if err != nil {
+ return fmt.Errorf("add policies: %w", err)
+ }
+
+ _, err = m.casbin.AddGroupingPolicies(snapshot.GroupingPolicy)
+ if err != nil {
+ return fmt.Errorf("add grouping policies: %w", err)
+ }
+
+ if snapshot.Version == SnapshotVersionV0 {
+ if err := upgradePoliciesFrom129(m.casbin, true); err != nil {
+ return fmt.Errorf("upgrade policies: %w", err)
+ }
+
+ if err := upgradeGroupingsFrom129(m.casbin, m.authNconf); err != nil {
+ return fmt.Errorf("upgrade groupings: %w", err)
+ }
+ }
+
+ // environment config needs to be applied again in case there were changes since the last snapshot
+ if err := applyPredefinedRoles(m.casbin, m.rbacConf, m.authNconf); err != nil {
+ return fmt.Errorf("apply env config: %w", err)
+ }
+
+ // Load the policies to ensure they are in memory
+ if err := m.casbin.LoadPolicy(); err != nil {
+ return fmt.Errorf("load policies: %w", err)
+ }
+
+ return nil
+}
+
+// BatchEnforcers is not needed after some digging they just loop over requests,
+// w.r.t.
+// source code https://github.com/casbin/casbin/blob/master/enforcer.go#L872
+// issue https://github.com/casbin/casbin/issues/710
+func (m *Manager) checkPermissions(principal *models.Principal, resource, verb string) (bool, error) {
+ // first check group permissions
+ for _, group := range principal.Groups {
+ allowed, err := m.casbin.Enforce(conv.PrefixGroupName(group), resource, verb)
+ if err != nil {
+ return false, err
+ }
+ if allowed {
+ return true, nil
+ }
+ }
+
+ // If no group permissions, check user permissions
+ return m.casbin.Enforce(conv.UserNameWithTypeFromPrincipal(principal), resource, verb)
+}
+
+func prettyPermissionsActions(perm *models.Permission) string {
+ if perm == nil || perm.Action == nil {
+ return ""
+ }
+ return *perm.Action
+}
+
+func prettyPermissionsResources(perm *models.Permission) string {
+ res := ""
+ if perm == nil {
+ return ""
+ }
+
+ if perm.Backups != nil {
+ s := fmt.Sprintf("Domain: %s,", authorization.BackupsDomain)
+ if perm.Backups.Collection != nil && *perm.Backups.Collection != "" {
+ s += fmt.Sprintf("Collection: %s", *perm.Backups.Collection)
+ }
+ s = strings.TrimSuffix(s, ",")
+ res += fmt.Sprintf("[%s]", s)
+ }
+
+ if perm.Data != nil {
+ s := fmt.Sprintf("Domain: %s,", authorization.DataDomain)
+ if perm.Data.Collection != nil && *perm.Data.Collection != "" {
+ s += fmt.Sprintf(" Collection: %s,", *perm.Data.Collection)
+ }
+ if perm.Data.Tenant != nil && *perm.Data.Tenant != "" {
+ s += fmt.Sprintf(" Tenant: %s,", *perm.Data.Tenant)
+ }
+ if perm.Data.Object != nil && *perm.Data.Object != "" {
+ s += fmt.Sprintf(" Object: %s", *perm.Data.Object)
+ }
+ s = strings.TrimSuffix(s, ",")
+ res += fmt.Sprintf("[%s]", s)
+ }
+
+ if perm.Nodes != nil {
+ s := fmt.Sprintf("Domain: %s,", authorization.NodesDomain)
+
+ if perm.Nodes.Verbosity != nil && *perm.Nodes.Verbosity != "" {
+ s += fmt.Sprintf(" Verbosity: %s,", *perm.Nodes.Verbosity)
+ }
+ if perm.Nodes.Collection != nil && *perm.Nodes.Collection != "" {
+ s += fmt.Sprintf(" Collection: %s", *perm.Nodes.Collection)
+ }
+ s = strings.TrimSuffix(s, ",")
+ res += fmt.Sprintf("[%s]", s)
+ }
+
+ if perm.Roles != nil {
+ s := fmt.Sprintf("Domain: %s,", authorization.RolesDomain)
+ if perm.Roles.Role != nil && *perm.Roles.Role != "" {
+ s += fmt.Sprintf(" Role: %s,", *perm.Roles.Role)
+ }
+ s = strings.TrimSuffix(s, ",")
+ res += fmt.Sprintf("[%s]", s)
+ }
+
+ if perm.Collections != nil {
+ s := fmt.Sprintf("Domain: %s,", authorization.CollectionsDomain)
+
+ if perm.Collections.Collection != nil && *perm.Collections.Collection != "" {
+ s += fmt.Sprintf(" Collection: %s,", *perm.Collections.Collection)
+ }
+ s = strings.TrimSuffix(s, ",")
+ res += fmt.Sprintf("[%s]", s)
+ }
+
+ if perm.Tenants != nil {
+ s := fmt.Sprintf("Domain: %s,", authorization.TenantsDomain)
+
+ if perm.Tenants.Tenant != nil && *perm.Tenants.Tenant != "" {
+ s += fmt.Sprintf(" Collection: %s,", *perm.Tenants.Collection)
+ s += fmt.Sprintf(" Tenant: %s", *perm.Tenants.Tenant)
+ }
+ s = strings.TrimSuffix(s, ",")
+ res += fmt.Sprintf("[%s]", s)
+ }
+
+ if perm.Users != nil {
+ s := fmt.Sprintf("Domain: %s,", authorization.UsersDomain)
+
+ if perm.Users.Users != nil {
+ s += fmt.Sprintf(" User: %s,", *perm.Users.Users)
+ }
+ s = strings.TrimSuffix(s, ",")
+ res += fmt.Sprintf("[%s]", s)
+ }
+
+ if perm.Replicate != nil {
+ s := fmt.Sprintf("Domain: %s,", authorization.ReplicateDomain)
+
+ if perm.Replicate.Collection != nil && *perm.Replicate.Collection != "" {
+ s += fmt.Sprintf(" Collection: %s,", *perm.Replicate.Collection)
+ }
+ if perm.Replicate.Shard != nil && *perm.Replicate.Shard != "" {
+ s += fmt.Sprintf(" Shard: %s,", *perm.Replicate.Shard)
+ }
+ s = strings.TrimSuffix(s, ",")
+ res += fmt.Sprintf("[%s]", s)
+ }
+
+ if perm.Aliases != nil {
+ s := fmt.Sprintf("Domain: %s,", authorization.AliasesDomain)
+
+ if perm.Aliases.Collection != nil && *perm.Aliases.Collection != "" {
+ s += fmt.Sprintf(" Collection: %s,", *perm.Aliases.Collection)
+ }
+ if perm.Aliases.Alias != nil && *perm.Aliases.Alias != "" {
+ s += fmt.Sprintf(" Alias: %s,", *perm.Aliases.Alias)
+ }
+ s = strings.TrimSuffix(s, ",")
+ res += fmt.Sprintf("[%s]", s)
+ }
+
+ return res
+}
+
+func prettyStatus(value bool) string {
+ if value {
+ return "success"
+ }
+ return "failed"
+}
+
+func collectStaleRoles(polices [][]string, casbinStoragePoliciesMap map[string]struct{}, casbinStoragePolicies [][][]string) [][][]string {
+ for _, p := range polices {
+ // ignore builtin roles
+ if slices.Contains(authorization.BuiltInRoles, conv.TrimRoleNamePrefix(p[1])) {
+ continue
+ }
+ // collect stale or empty roles
+ if _, ok := casbinStoragePoliciesMap[p[1]]; !ok {
+ // e.g. policy line in casbin -> g, user:wv_internal_empty, role:roleName, that's why p[1]
+ casbinStoragePolicies = append(casbinStoragePolicies, [][]string{{
+ p[1], conv.InternalPlaceHolder, conv.InternalPlaceHolder, "*",
+ }})
+ }
+ }
+ return casbinStoragePolicies
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/manager_test.go b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/manager_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..e5fd0ba31d272b8fef05bf993fffea48faa7695c
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/manager_test.go
@@ -0,0 +1,306 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package rbac
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/weaviate/weaviate/usecases/auth/authentication"
+
+ "github.com/sirupsen/logrus/hooks/test"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/weaviate/weaviate/usecases/auth/authorization"
+ "github.com/weaviate/weaviate/usecases/auth/authorization/conv"
+)
+
+func TestSnapshotAndRestore(t *testing.T) {
+ tests := []struct {
+ name string
+ setupPolicies func(*Manager) error
+ wantErr bool
+ }{
+ {
+ name: "empty policies",
+ setupPolicies: func(m *Manager) error {
+ return nil
+ },
+ },
+ {
+ name: "with role and policy",
+ setupPolicies: func(m *Manager) error {
+ // Add a role and policy
+ _, err := m.casbin.AddNamedPolicy("p", conv.PrefixRoleName("admin"), "*", authorization.READ, authorization.SchemaDomain)
+ if err != nil {
+ return err
+ }
+ _, err = m.casbin.AddRoleForUser(conv.UserNameWithTypeFromId("test-user", authentication.AuthTypeDb), conv.PrefixRoleName("admin"))
+ return err
+ },
+ },
+ {
+ name: "multiple roles and policies",
+ setupPolicies: func(m *Manager) error {
+ // Add multiple roles and policies
+ _, err := m.casbin.AddNamedPolicy("p", conv.PrefixRoleName("admin"), "*", authorization.READ, authorization.SchemaDomain)
+ if err != nil {
+ return err
+ }
+ _, err = m.casbin.AddNamedPolicy("p", conv.PrefixRoleName("editor"), "collections/*", authorization.UPDATE, authorization.SchemaDomain)
+ if err != nil {
+ return err
+ }
+ _, err = m.casbin.AddRoleForUser(conv.UserNameWithTypeFromId("test-user", authentication.AuthTypeDb), conv.PrefixRoleName("admin"))
+ if err != nil {
+ return err
+ }
+ _, err = m.casbin.AddRoleForUser(conv.UserNameWithTypeFromId("test-user", authentication.AuthTypeDb), conv.PrefixRoleName("editor"))
+ return err
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Setup logger with hook for testing
+ logger, _ := test.NewNullLogger()
+ m, err := setupTestManager(t, logger)
+ require.NoError(t, err)
+
+ // Get initial policies before our test setup
+ initialPolicies, err := m.casbin.GetPolicy()
+ require.NoError(t, err)
+ initialGroupingPolicies, err := m.casbin.GetGroupingPolicy()
+ require.NoError(t, err)
+
+ // Setup policies if needed
+ if tt.setupPolicies != nil {
+ err := tt.setupPolicies(m)
+ require.NoError(t, err)
+ }
+
+ // Take snapshot
+ snapshotData, err := m.Snapshot()
+ require.NoError(t, err)
+ require.NotNil(t, snapshotData)
+
+ // Create a new manager for restore
+ m2, err := setupTestManager(t, logger)
+ require.NoError(t, err)
+
+ // Restore from snapshot
+ err = m2.Restore(snapshotData)
+ if tt.wantErr {
+ require.Error(t, err)
+ return
+ }
+ require.NoError(t, err)
+
+ // Get final policies after our test setup
+ finalPolicies, err := m.casbin.GetPolicy()
+ require.NoError(t, err)
+ finalGroupingPolicies, err := m.casbin.GetGroupingPolicy()
+ require.NoError(t, err)
+
+ // Get restored policies
+ restoredPolicies, err := m2.casbin.GetPolicy()
+ require.NoError(t, err)
+ restoredGroupingPolicies, err := m2.casbin.GetGroupingPolicy()
+ require.NoError(t, err)
+
+ // Compare only the delta of policies we added
+ addedPolicies := getPolicyDelta(initialPolicies, finalPolicies)
+ restoredAddedPolicies := getPolicyDelta(initialPolicies, restoredPolicies)
+ assert.ElementsMatch(t, addedPolicies, restoredAddedPolicies)
+
+ // Compare only the delta of grouping policies we added
+ addedGroupingPolicies := getPolicyDelta(initialGroupingPolicies, finalGroupingPolicies)
+ restoredAddedGroupingPolicies := getPolicyDelta(initialGroupingPolicies, restoredGroupingPolicies)
+ assert.ElementsMatch(t, addedGroupingPolicies, restoredAddedGroupingPolicies)
+ })
+ }
+}
+
+// getPolicyDelta returns the policies that are in b but not in a
+func getPolicyDelta(a, b [][]string) [][]string {
+ delta := make([][]string, 0)
+ for _, policyB := range b {
+ found := false
+ for _, policyA := range a {
+ if equalPolicies(policyA, policyB) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ delta = append(delta, policyB)
+ }
+ }
+ return delta
+}
+
+// equalPolicies compares two policies for equality
+func equalPolicies(a, b []string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func TestSnapshotNilCasbin(t *testing.T) {
+ logger, _ := test.NewNullLogger()
+ m := &Manager{
+ casbin: nil,
+ logger: logger,
+ }
+
+ snapshotData, err := m.Snapshot()
+ require.NoError(t, err)
+ assert.Nil(t, snapshotData)
+}
+
+func TestRestoreNilCasbin(t *testing.T) {
+ logger, _ := test.NewNullLogger()
+ m := &Manager{
+ casbin: nil,
+ logger: logger,
+ }
+
+ err := m.Restore([]byte("{}"))
+ require.NoError(t, err)
+}
+
+func TestRestoreInvalidData(t *testing.T) {
+ logger, _ := test.NewNullLogger()
+ m, err := setupTestManager(t, logger)
+ require.NoError(t, err)
+
+ // Test with invalid JSON
+ err = m.Restore([]byte("invalid json"))
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "decode json")
+
+ // Test with empty data
+ err = m.Restore([]byte("{}"))
+ require.NoError(t, err)
+}
+
+func TestRestoreEmptyData(t *testing.T) {
+ logger, _ := test.NewNullLogger()
+ m, err := setupTestManager(t, logger)
+ require.NoError(t, err)
+
+ _, err = m.casbin.AddNamedPolicy("p", conv.PrefixRoleName("admin"), "*", authorization.READ, authorization.SchemaDomain)
+ require.NoError(t, err)
+
+ policies, err := m.casbin.GetPolicy()
+ require.NoError(t, err)
+ require.Len(t, policies, 5)
+
+ err = m.Restore([]byte{})
+ require.NoError(t, err)
+
+ // nothing overwritten
+ policies, err = m.casbin.GetPolicy()
+ require.NoError(t, err)
+ require.Len(t, policies, 5)
+}
+
+func TestSnapshotAndRestoreUpgrade(t *testing.T) {
+ tests := []struct {
+ name string
+ policiesInput [][]string
+ policiesExpected [][]string
+ groupingsInput [][]string
+ groupingsExpected [][]string
+ }{
+ {
+ name: "assign users",
+ policiesInput: [][]string{
+ {"role:some_role", "users/.*", "U", "users"},
+ },
+ policiesExpected: [][]string{
+ {"role:some_role", "users/.*", "A", "users"},
+ // build-in roles are added after restore
+ {"role:viewer", "*", authorization.READ, "*"},
+ {"role:read-only", "*", authorization.READ, "*"},
+ {"role:admin", "*", conv.VALID_VERBS, "*"},
+ {"role:root", "*", conv.VALID_VERBS, "*"},
+ },
+ },
+ {
+ name: "build-in",
+ policiesInput: [][]string{
+ {"role:viewer", "*", "R", "*"},
+ {"role:admin", "*", "(C)|(R)|(U)|(D)", "*"},
+ },
+ policiesExpected: [][]string{
+ {"role:viewer", "*", "R", "*"},
+ {"role:read-only", "*", "R", "*"},
+ {"role:admin", "*", conv.VALID_VERBS, "*"},
+ // build-in roles are added after restore
+ {"role:root", "*", conv.VALID_VERBS, "*"},
+ },
+ },
+ {
+ name: "users",
+ policiesInput: [][]string{
+ {"role:admin", "*", "(C)|(R)|(U)|(D)", "*"}, // present to iterate over all roles in downgrade
+ },
+ policiesExpected: [][]string{
+ {"role:admin", "*", "(C)|(R)|(U)|(D)|(A)", "*"},
+ // build-in roles are added after restore
+ {"role:viewer", "*", authorization.READ, "*"},
+ {"role:read-only", "*", authorization.READ, "*"},
+ {"role:root", "*", conv.VALID_VERBS, "*"},
+ },
+ groupingsInput: [][]string{
+ {"user:test-user", "role:admin"},
+ },
+ groupingsExpected: [][]string{
+ {"db:test-user", "role:admin"},
+ {"oidc:test-user", "role:admin"},
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ logger, _ := test.NewNullLogger()
+ m, err := setupTestManager(t, logger)
+ require.NoError(t, err)
+
+ sh := snapshot{Version: 0, GroupingPolicy: tt.groupingsInput, Policy: tt.policiesInput}
+
+ bytes, err := json.Marshal(sh)
+ require.NoError(t, err)
+
+ err = m.Restore(bytes)
+ require.NoError(t, err)
+
+ finalPolicies, err := m.casbin.GetPolicy()
+ require.NoError(t, err)
+ assert.ElementsMatch(t, finalPolicies, tt.policiesExpected)
+
+ finalGroupingPolicies, err := m.casbin.GetGroupingPolicy()
+ require.NoError(t, err)
+ assert.Equal(t, finalGroupingPolicies, tt.groupingsExpected)
+ })
+ }
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/model.go b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/model.go
new file mode 100644
index 0000000000000000000000000000000000000000..af0d437c32608a67bcfd8b2823dbabfa4e7aa99f
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/model.go
@@ -0,0 +1,320 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package rbac
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "path/filepath"
+ "slices"
+ "strconv"
+ "strings"
+
+ "github.com/weaviate/weaviate/usecases/auth/authentication"
+
+ "github.com/weaviate/weaviate/usecases/auth/authorization"
+ "github.com/weaviate/weaviate/usecases/auth/authorization/conv"
+
+ "github.com/casbin/casbin/v2"
+ "github.com/casbin/casbin/v2/model"
+ fileadapter "github.com/casbin/casbin/v2/persist/file-adapter"
+ casbinutil "github.com/casbin/casbin/v2/util"
+ "github.com/pkg/errors"
+
+ "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf"
+ "github.com/weaviate/weaviate/usecases/build"
+ "github.com/weaviate/weaviate/usecases/config"
+)
+
+const DEFAULT_POLICY_VERSION = "1.29.0"
+
+const (
+ // MODEL is the used model for casbin to store roles, permissions, users and comparisons patterns
+ // docs: https://casbin.org/docs/syntax-for-models
+ MODEL = `
+ [request_definition]
+ r = sub, obj, act
+
+ [policy_definition]
+ p = sub, obj, act, dom
+
+ [role_definition]
+ g = _, _
+
+ [policy_effect]
+ e = some(where (p.eft == allow))
+
+ [matchers]
+ m = g(r.sub, p.sub) && weaviateMatcher(r.obj, p.obj) && regexMatch(r.act, p.act)
+`
+)
+
+func createStorage(filePath string) error {
+ if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil {
+ return fmt.Errorf("failed to create directories: %w", err)
+ }
+
+ _, err := os.Stat(filePath)
+ if err == nil { // file exists
+ return nil
+ }
+
+ if os.IsNotExist(err) {
+ file, err := os.Create(filePath)
+ if err != nil {
+ return fmt.Errorf("failed to create file: %w", err)
+ }
+ defer file.Close()
+ return nil
+ }
+
+ return err
+}
+
+func Init(conf rbacconf.Config, policyPath string, authNconf config.Authentication) (*casbin.SyncedCachedEnforcer, error) {
+ if !conf.Enabled {
+ return nil, nil
+ }
+
+ m, err := model.NewModelFromString(MODEL)
+ if err != nil {
+ return nil, fmt.Errorf("load rbac model: %w", err)
+ }
+
+ enforcer, err := casbin.NewSyncedCachedEnforcer(m)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create enforcer: %w", err)
+ }
+ enforcer.EnableCache(true)
+
+ rbacStoragePath := fmt.Sprintf("%s/rbac", policyPath)
+ rbacStorageFilePath := fmt.Sprintf("%s/rbac/policy.csv", policyPath)
+
+ if err := createStorage(rbacStorageFilePath); err != nil {
+ return nil, errors.Wrapf(err, "create storage path: %v", rbacStorageFilePath)
+ }
+
+ enforcer.SetAdapter(fileadapter.NewAdapter(rbacStorageFilePath))
+
+ if err := enforcer.LoadPolicy(); err != nil {
+ return nil, err
+ }
+ // parse version string to check if upgrade is needed
+ policyVersion, err := getVersion(rbacStoragePath)
+ if err != nil {
+ return nil, err
+ }
+ versionParts := strings.Split(policyVersion, ".")
+ minorVersion, err := strconv.Atoi(versionParts[1])
+ if err != nil {
+ return nil, err
+ }
+
+ if versionParts[0] == "1" && minorVersion < 30 {
+ if err := upgradePoliciesFrom129(enforcer, false); err != nil {
+ return nil, err
+ }
+
+ if err := upgradeGroupingsFrom129(enforcer, authNconf); err != nil {
+ return nil, err
+ }
+ }
+ // docs: https://casbin.org/docs/function/
+ enforcer.AddFunction("weaviateMatcher", WeaviateMatcherFunc)
+
+ if err := applyPredefinedRoles(enforcer, conf, authNconf); err != nil {
+ return nil, errors.Wrapf(err, "apply env config")
+ }
+
+ // update version after casbin policy has been written
+ if err := writeVersion(rbacStoragePath, build.Version); err != nil {
+ return nil, err
+ }
+
+ return enforcer, nil
+}
+
+// applyPredefinedRoles adds pre-defined roles (admin/viewer/root) and assigns them to the users provided in the
+// local config
+func applyPredefinedRoles(enforcer *casbin.SyncedCachedEnforcer, conf rbacconf.Config, authNconf config.Authentication) error {
+ // remove preexisting root role including assignments
+ _, err := enforcer.RemoveFilteredNamedPolicy("p", 0, conv.PrefixRoleName(authorization.Root))
+ if err != nil {
+ return err
+ }
+ _, err = enforcer.RemoveFilteredGroupingPolicy(1, conv.PrefixRoleName(authorization.Root))
+ if err != nil {
+ return err
+ }
+
+ _, err = enforcer.RemoveFilteredNamedPolicy("p", 0, conv.PrefixRoleName(authorization.ReadOnly))
+ if err != nil {
+ return err
+ }
+ _, err = enforcer.RemoveFilteredGroupingPolicy(1, conv.PrefixRoleName(authorization.ReadOnly))
+ if err != nil {
+ return err
+ }
+
+ // add pre existing roles
+ for name, verb := range conv.BuiltInPolicies {
+ if verb == "" {
+ continue
+ }
+ if _, err := enforcer.AddNamedPolicy("p", conv.PrefixRoleName(name), "*", verb, "*"); err != nil {
+ return fmt.Errorf("add policy: %w", err)
+ }
+ }
+
+ for i := range conf.RootUsers {
+ if strings.TrimSpace(conf.RootUsers[i]) == "" {
+ continue
+ }
+
+ if authNconf.APIKey.Enabled && slices.Contains(authNconf.APIKey.Users, conf.RootUsers[i]) {
+ if _, err := enforcer.AddRoleForUser(conv.UserNameWithTypeFromId(conf.RootUsers[i], authentication.AuthTypeDb), conv.PrefixRoleName(authorization.Root)); err != nil {
+ return fmt.Errorf("add role for user: %w", err)
+ }
+ }
+
+ if authNconf.OIDC.Enabled {
+ if _, err := enforcer.AddRoleForUser(conv.UserNameWithTypeFromId(conf.RootUsers[i], authentication.AuthTypeOIDC), conv.PrefixRoleName(authorization.Root)); err != nil {
+ return fmt.Errorf("add role for user: %w", err)
+ }
+ }
+ }
+
+ // temporary to enable import of existing keys to WCD (Admin + readonly)
+ for i := range conf.AdminUsers {
+ if strings.TrimSpace(conf.AdminUsers[i]) == "" {
+ continue
+ }
+
+ if authNconf.APIKey.Enabled && slices.Contains(authNconf.APIKey.Users, conf.AdminUsers[i]) {
+ if _, err := enforcer.AddRoleForUser(conv.UserNameWithTypeFromId(conf.AdminUsers[i], authentication.AuthTypeDb), conv.PrefixRoleName(authorization.Admin)); err != nil {
+ return fmt.Errorf("add role for user: %w", err)
+ }
+ }
+
+ if authNconf.OIDC.Enabled {
+ if _, err := enforcer.AddRoleForUser(conv.UserNameWithTypeFromId(conf.AdminUsers[i], authentication.AuthTypeOIDC), conv.PrefixRoleName(authorization.Admin)); err != nil {
+ return fmt.Errorf("add role for user: %w", err)
+ }
+ }
+ }
+
+ for i := range conf.ViewerUsers {
+ if strings.TrimSpace(conf.ViewerUsers[i]) == "" {
+ continue
+ }
+
+ if authNconf.APIKey.Enabled && slices.Contains(authNconf.APIKey.Users, conf.ViewerUsers[i]) {
+ if _, err := enforcer.AddRoleForUser(conv.UserNameWithTypeFromId(conf.ViewerUsers[i], authentication.AuthTypeDb), conv.PrefixRoleName(authorization.Viewer)); err != nil {
+ return fmt.Errorf("add role for user: %w", err)
+ }
+ }
+
+ if authNconf.OIDC.Enabled {
+ if _, err := enforcer.AddRoleForUser(conv.UserNameWithTypeFromId(conf.ViewerUsers[i], authentication.AuthTypeOIDC), conv.PrefixRoleName(authorization.Viewer)); err != nil {
+ return fmt.Errorf("add role for user: %w", err)
+ }
+ }
+ }
+
+ for _, group := range conf.RootGroups {
+ if strings.TrimSpace(group) == "" {
+ continue
+ }
+ if _, err := enforcer.AddRoleForUser(conv.PrefixGroupName(group), conv.PrefixRoleName(authorization.Root)); err != nil {
+ return fmt.Errorf("add role for group %s: %w", group, err)
+ }
+ }
+
+ for _, viewerGroup := range conf.ReadOnlyGroups {
+ if strings.TrimSpace(viewerGroup) == "" {
+ continue
+ }
+ if _, err := enforcer.AddRoleForUser(conv.PrefixGroupName(viewerGroup), conv.PrefixRoleName(authorization.ReadOnly)); err != nil {
+ return fmt.Errorf("add viewer role for group %s: %w", viewerGroup, err)
+ }
+ }
+
+ if err := enforcer.SavePolicy(); err != nil {
+ return errors.Wrapf(err, "save policy")
+ }
+
+ return nil
+}
+
+func WeaviateMatcher(key1 string, key2 string) bool {
+ // If we're dealing with a tenant-specific path (matches /shards/#$)
+ if strings.HasSuffix(key1, "/shards/#") {
+ // Don't allow matching with wildcard patterns
+ if strings.HasSuffix(key2, "/shards/.*") {
+ return false
+ }
+ }
+ // For all other cases, use standard KeyMatch5
+ return casbinutil.KeyMatch5(key1, key2)
+}
+
+func WeaviateMatcherFunc(args ...interface{}) (interface{}, error) {
+ name1 := args[0].(string)
+ name2 := args[1].(string)
+
+ return (bool)(WeaviateMatcher(name1, name2)), nil
+}
+
+func getVersion(path string) (string, error) {
+ filePath := path + "/version"
+ _, err := os.Stat(filePath)
+ if err != nil { // file exists
+ return DEFAULT_POLICY_VERSION, nil
+ }
+ b, err := os.ReadFile(filePath)
+ if err != nil {
+ return "", err
+ }
+ return string(b), nil
+}
+
+func writeVersion(path, version string) error {
+ tmpFile, err := os.CreateTemp(path, "policy-temp-*.tmp")
+ if err != nil {
+ return err
+ }
+ tempFilename := tmpFile.Name()
+
+ defer func() {
+ tmpFile.Close()
+ os.Remove(tempFilename) // Remove temp file if it still exists
+ }()
+
+ writer := bufio.NewWriter(tmpFile)
+ if _, err := fmt.Fprint(writer, version); err != nil {
+ return err
+ }
+
+ // Flush the writer to ensure all data is written, then sync and flush tmpfile and atomically rename afterwards
+ if err := writer.Flush(); err != nil {
+ return err
+ }
+ if err := tmpFile.Sync(); err != nil {
+ return err
+ }
+ if err := tmpFile.Close(); err != nil {
+ return err
+ }
+
+ return os.Rename(tempFilename, path+"/version")
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/model_test.go b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/model_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..2f236387fe773b1f3653f29baa0c376a9395e03d
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/model_test.go
@@ -0,0 +1,170 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package rbac
+
+import (
+ "testing"
+
+ "github.com/weaviate/weaviate/usecases/auth/authorization"
+ "github.com/weaviate/weaviate/usecases/auth/authorization/conv"
+)
+
+func testKeyMatch5(t *testing.T, key1, key2 string, expected bool) {
+ t.Helper()
+ if result := WeaviateMatcher(key1, key2); result != expected {
+ t.Errorf("WeaviateMatcher(%q, %q) = %v; want %v", key1, key2, result, expected)
+ }
+}
+
+func TestKeyMatch5AuthZ(t *testing.T) {
+ tests := []struct {
+ name string
+ key1 string
+ key2 string
+ expected bool
+ }{
+ // Allow all
+ {"Allow all roles", authorization.Roles()[0], "*", true},
+ {"Allow all collections", authorization.CollectionsMetadata()[0], "*", true},
+ {"Allow all collections with ABC", authorization.CollectionsMetadata("ABC")[0], "*", true},
+ {"Allow all shards", authorization.ShardsMetadata("")[0], "*", true},
+ {"Allow all shards with ABC", authorization.ShardsMetadata("ABC", "ABC")[0], "*", true},
+ {"Allow all objects", authorization.Objects("", "", ""), "*", true},
+ {"Allow all objects with Tenant1", authorization.Objects("", "Tenant1", ""), "*", true},
+ {"Allow all tenants", authorization.ShardsMetadata("")[0], "*", true},
+ {"Allow all tenants with ABC", authorization.ShardsMetadata("ABC", "ABC")[0], "*", true},
+
+ // Class level
+ {"Class level collections ABC", authorization.CollectionsMetadata("ABC")[0], conv.CasbinSchema("*", "#"), true},
+ {"Class level shards ABC", authorization.ShardsMetadata("ABC")[0], conv.CasbinSchema("*", "*"), true},
+ {"Class level collections ABC exact", authorization.CollectionsMetadata("ABC")[0], conv.CasbinSchema("ABC", "#"), true},
+ {"Class level collections Class1 exact", authorization.CollectionsMetadata("Class1")[0], conv.CasbinSchema("Class1", "#"), true},
+ {"Class level collections Class2 mismatch", authorization.CollectionsMetadata("Class2")[0], conv.CasbinSchema("Class1", "#"), false},
+ {"Class level shards ABC TenantX", authorization.ShardsMetadata("ABC", "TenantX")[0], conv.CasbinSchema("ABC", ""), true},
+ {"Class level objects ABC TenantX objectY", authorization.Objects("ABC", "TenantX", "objectY"), conv.CasbinData("ABC", "*", "*"), true},
+ {"Class level tenant ABC TenantX", authorization.ShardsMetadata("ABC", "TenantX")[0], conv.CasbinSchema("ABC", ""), true},
+
+ // Tenants level
+ {"Tenants level shards", authorization.ShardsMetadata("")[0], conv.CasbinSchema("*", "*"), true},
+ {"Tenants level shards ABC Tenant1", authorization.ShardsMetadata("ABC", "Tenant1")[0], conv.CasbinSchema("*", "*"), true},
+ {"Tenants level shards Class1 Tenant1", authorization.ShardsMetadata("Class1", "Tenant1")[0], conv.CasbinSchema("*", "Tenant1"), true},
+ {"Tenants level objects Class1 Tenant1 ObjectY", authorization.Objects("Class1", "Tenant1", "ObjectY"), conv.CasbinData("*", "Tenant1", ""), true},
+ {"Tenants level shards Class1 Tenant2 mismatch", authorization.ShardsMetadata("Class1", "Tenant2")[0], conv.CasbinSchema("*", "Tenant1"), false},
+ {"Tenants level shards Class1 Tenant2 mismatch 2", authorization.ShardsMetadata("Class1", "Tenant2")[0], conv.CasbinSchema("Class2", "Tenant1"), false},
+ {"Tenants level shards mismatch", authorization.ShardsMetadata("")[0], conv.CasbinSchema("Class1", ""), false},
+ {"Tenants level collections Class1", authorization.CollectionsMetadata("Class1")[0], conv.CasbinSchema("Class1", "#"), true},
+ {"Tenants level shards Class1 tenant1", authorization.ShardsMetadata("Class1", "tenant1")[0], conv.CasbinSchema("Class1", ""), true},
+
+ // Objects level
+ {"Objects level all", authorization.Objects("", "", ""), conv.CasbinData(".*", ".*", ".*"), true},
+ {"Objects level ABC Tenant1", authorization.Objects("ABC", "Tenant1", ""), conv.CasbinData("*", "*", "*"), true},
+ {"Objects level ABC Tenant1 exact", authorization.Objects("ABC", "Tenant1", ""), conv.CasbinData("*", "Tenant1", "*"), true},
+ {"Objects level ABC Tenant1 abc", authorization.Objects("ABC", "Tenant1", "abc"), conv.CasbinData("*", "Tenant1", "*"), true},
+ {"Objects level ABC Tenant1 abc exact", authorization.Objects("ABC", "Tenant1", "abc"), conv.CasbinData("*", "Tenant1", "*"), true},
+ {"Objects level ABC Tenant1 abc exact 2", authorization.Objects("ABC", "Tenant1", "abc"), conv.CasbinData("*", "*", "abc"), true},
+ {"Objects level ABC Tenant1 abc exact 3", authorization.Objects("ABC", "Tenant1", "abc"), conv.CasbinData("ABC", "Tenant1", "abc"), true},
+ {"Objects level ABCD Tenant1 abc mismatch", authorization.Objects("ABCD", "Tenant1", "abc"), conv.CasbinData("ABC", "Tenant1", "abc"), false},
+ {"Objects level ABC Tenant1 abcd mismatch", authorization.Objects("ABC", "Tenant1", "abcd"), conv.CasbinData("ABC", "Tenant1", "abc"), false},
+ {"Objects level ABC bar abcd", authorization.Objects("ABC", "bar", "abcd"), conv.CasbinData("*", "bar", ""), true},
+
+ // Tenants
+ {"Tenants level tenant", authorization.ShardsMetadata("")[0], conv.CasbinSchema("*", "*"), true},
+ {"Tenants level tenant ABC Tenant1", authorization.ShardsMetadata("ABC", "Tenant1")[0], conv.CasbinSchema("*", "*"), true},
+ {"Tenants level tenant Class1 Tenant1", authorization.ShardsMetadata("Class1", "Tenant1")[0], conv.CasbinSchema("*", "Tenant1"), true},
+ {"Tenants level objects Class1 Tenant1 ObjectY", authorization.Objects("Class1", "Tenant1", "ObjectY"), conv.CasbinData("*", "Tenant1", ""), true},
+ {"Tenants level tenant Class1 Tenant2 mismatch", authorization.ShardsMetadata("Class1", "Tenant2")[0], conv.CasbinSchema("*", "Tenant1"), false},
+ {"Tenants level tenant Class1 Tenant2 mismatch 2", authorization.ShardsMetadata("Class1", "Tenant2")[0], conv.CasbinSchema("Class2", "Tenant1"), false},
+ {"Tenants level tenant mismatch", authorization.ShardsMetadata("")[0], conv.CasbinSchema("Class1", ""), false},
+ {"Tenants level collections Class1", authorization.ShardsMetadata("Class1")[0], conv.CasbinSchema("Class1", ""), true},
+ {"Tenants level tenant Class1 tenant1", authorization.ShardsMetadata("Class1", "tenant1")[0], conv.CasbinSchema("Class1", ""), true},
+
+ // Regex
+ {"Regex collections ABCD", authorization.CollectionsMetadata("ABCD")[0], conv.CasbinSchema("ABC", "#"), false},
+ {"Regex shards ABC", authorization.ShardsMetadata("ABC", "")[0], conv.CasbinSchema("ABC", ""), true},
+ {"Regex objects ABC", authorization.Objects("ABC", "", ""), conv.CasbinData("ABC", "*", "*"), true},
+ {"Regex objects ABCD mismatch", authorization.Objects("ABCD", "", ""), conv.CasbinData("ABC", "*", "*"), false},
+ {"Regex objects ABCD wildcard", authorization.Objects("ABCD", "", ""), conv.CasbinData("ABC.*", "*", "*"), true},
+ {"Regex objects BCD mismatch", authorization.Objects("BCD", "", ""), conv.CasbinData("ABC", "*", "*"), false},
+ {"Regex tenant ABC", authorization.ShardsMetadata("ABC", "")[0], conv.CasbinSchema("ABC", ""), true},
+
+ {"Regex collections ABC wildcard", authorization.CollectionsMetadata("ABC")[0], conv.CasbinSchema("ABC*", "#"), true},
+ {"Regex collections ABC wildcard 2", authorization.CollectionsMetadata("ABC")[0], conv.CasbinSchema("ABC*", "#"), true},
+ {"Regex collections ABCD wildcard", authorization.CollectionsMetadata("ABCD")[0], conv.CasbinSchema("ABC*", "#"), true},
+
+ // ShardsMetadata read on collections level permissions
+ {"ShardsMetadata read on collections level ABC", authorization.ShardsMetadata("ABC")[0], conv.CasbinSchema("ABC", ""), true},
+
+ // some other cases
+ {"Mismatched collection", authorization.CollectionsMetadata("Class1")[0], conv.CasbinSchema("Class2", "#"), false},
+ {"Mismatched shard", authorization.ShardsMetadata("Class1", "Shard1")[0], conv.CasbinSchema("Class1", "Shard2"), false},
+ {"Partial match role", authorization.Roles("anotherRole")[0], conv.CasbinRoles("ro*"), false},
+ {"Partial match role", authorization.Roles("role")[0], conv.CasbinRoles("ro*"), true},
+ {"Partial match collection", authorization.CollectionsMetadata("Class1")[0], conv.CasbinSchema("Cla*", "#"), true},
+ {"Partial match shard", authorization.ShardsMetadata("Class1", "Shard1")[0], conv.CasbinSchema("Class1", "Sha*"), true},
+ {"Partial match object", authorization.Objects("Class1", "Shard1", "Object1"), conv.CasbinData("Class1", "Shard1", "Obj*"), true},
+ {"Special character mismatch", authorization.Objects("Class1", "Shard1", "Object1"), "data/collections/Class1/shards/Shard1/objects/Object1!", false},
+ {"Mismatched object", authorization.Objects("Class1", "Shard1", "Object1"), conv.CasbinData("Class1", "Shard1", "Object2"), false},
+ {"Mismatched tenant", authorization.ShardsMetadata("Class1", "Tenant1")[0], conv.CasbinSchema("Class1", "Tenant2"), false},
+
+ {"Collection check vs all shards", authorization.CollectionsMetadata("Class1")[0], conv.CasbinSchema("Class1", "*"), false},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ testKeyMatch5(t, tt.key1, tt.key2, tt.expected)
+ })
+ }
+}
+
+func TestKeyMatchTenant(t *testing.T) {
+ tests := []struct {
+ name string
+ key1 string
+ key2 string
+ expected bool
+ }{
+ // Tenant specific patterns
+ {
+ "Tenant specific path should not match wildcard",
+ "schema/collections/Class1/shards/#",
+ "schema/collections/Class1/shards/.*",
+ false,
+ },
+ {
+ "Tenant specific path should match exact #",
+ "schema/collections/Class1/shards/#",
+ "schema/collections/Class1/shards/#",
+ true,
+ },
+ {
+ "Regular shard should match wildcard",
+ "schema/collections/Class1/shards/shard-1",
+ "schema/collections/Class1/shards/.*",
+ true,
+ },
+ {
+ "Regular shard should not match tenant specific",
+ "schema/collections/Class1/shards/shard-1",
+ "schema/collections/Class1/shards/#",
+ false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := WeaviateMatcher(tt.key1, tt.key2)
+ if result != tt.expected {
+ t.Errorf("WeaviateMatcher(%s, %s) = %v; want %v", tt.key1, tt.key2, result, tt.expected)
+ }
+ })
+ }
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/rbacconf/config.go b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/rbacconf/config.go
new file mode 100644
index 0000000000000000000000000000000000000000..d1fba84e3083dc446263eb20b89717a2fc107845
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/rbacconf/config.go
@@ -0,0 +1,30 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package rbacconf
+
+// Config makes every subject on the list an admin, whereas everyone else
+// has no rights whatsoever
+type Config struct {
+ Enabled bool `json:"enabled" yaml:"enabled"`
+ RootUsers []string `json:"root_users" yaml:"root_users"`
+ RootGroups []string `json:"root_groups" yaml:"root_groups"`
+ ReadOnlyGroups []string `json:"readonly_groups" yaml:"readonly_groups"`
+ ViewerUsers []string `json:"viewer_users" yaml:"viewer_users"`
+ AdminUsers []string `json:"admin_users" yaml:"admin_users"`
+ IpInAuditDisabled bool `json:"ip_in_audit" yaml:"ip_in_audit"`
+}
+
+// Validate admin list config for viability, can be called from the central
+// config package
+func (c Config) Validate() error {
+ return nil
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/up_down_grades.go b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/up_down_grades.go
new file mode 100644
index 0000000000000000000000000000000000000000..f33e60d5f9a12eb23323129d6fa00efb7806267d
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/up_down_grades.go
@@ -0,0 +1,105 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package rbac
+
+import (
+ "fmt"
+ "slices"
+ "strings"
+
+ "github.com/weaviate/weaviate/usecases/auth/authentication"
+
+ "github.com/casbin/casbin/v2"
+ "github.com/weaviate/weaviate/usecases/auth/authorization"
+ "github.com/weaviate/weaviate/usecases/auth/authorization/conv"
+ "github.com/weaviate/weaviate/usecases/config"
+)
+
+func upgradeGroupingsFrom129(enforcer *casbin.SyncedCachedEnforcer, authNconf config.Authentication) error {
+ // clear out assignments without namespaces and re-add them with namespaces
+ roles, _ := enforcer.GetAllSubjects()
+ for _, role := range roles {
+ users, err := enforcer.GetUsersForRole(role)
+ if err != nil {
+ return err
+ }
+
+ for _, user := range users {
+ // internal user assignments (for empty roles) need to be converted from namespaced assignment to db-user only
+ // other assignments need to be converted to both namespaces
+ if strings.Contains(user, conv.InternalPlaceHolder) {
+ if _, err := enforcer.DeleteRoleForUser(user, role); err != nil {
+ return err
+ }
+
+ if _, err := enforcer.AddRoleForUser(conv.UserNameWithTypeFromId(conv.InternalPlaceHolder, authentication.AuthTypeDb), role); err != nil {
+ return err
+ }
+ } else if strings.HasPrefix(user, "user:") {
+ userNoPrefix := strings.TrimPrefix(user, "user:")
+ if _, err := enforcer.DeleteRoleForUser(user, role); err != nil {
+ return err
+ }
+ if authNconf.APIKey.Enabled && slices.Contains(authNconf.APIKey.Users, userNoPrefix) {
+ if _, err := enforcer.AddRoleForUser(conv.UserNameWithTypeFromId(userNoPrefix, authentication.AuthTypeDb), role); err != nil {
+ return err
+ }
+ }
+ if authNconf.OIDC.Enabled {
+ if _, err := enforcer.AddRoleForUser(conv.UserNameWithTypeFromId(userNoPrefix, authentication.AuthTypeOIDC), role); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ }
+ return nil
+}
+
+func upgradePoliciesFrom129(enforcer *casbin.SyncedCachedEnforcer, keepBuildInRoles bool) error {
+ policies, err := enforcer.GetPolicy()
+ if err != nil {
+ return err
+ }
+
+ // a role can have multiple policies, so first all old role need to be removed and then re-added
+ policiesToAdd := make([][]string, 0, len(policies))
+ for _, policy := range policies {
+ if _, err := enforcer.RemoveFilteredNamedPolicy("p", 0, policy[0]); err != nil {
+ return err
+ }
+
+ if policy[3] == authorization.UsersDomain && policy[2] == authorization.UPDATE {
+ policy[2] = authorization.USER_AND_GROUP_ASSIGN_AND_REVOKE
+ }
+
+ policiesToAdd = append(policiesToAdd, policy)
+ }
+
+ for _, policy := range policiesToAdd {
+ roleName := conv.TrimRoleNamePrefix(policy[0])
+ if _, ok := conv.BuiltInPolicies[roleName]; ok {
+ if !keepBuildInRoles {
+ continue
+ } else if policy[2] == conv.CRUD {
+ policy[2] = conv.VALID_VERBS
+ }
+ }
+
+ if _, err := enforcer.AddNamedPolicy("p", policy[0], policy[1], policy[2], policy[3]); err != nil {
+ return fmt.Errorf("readd policy: %w", err)
+ }
+ }
+
+ return nil
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/up_down_grades_test.go b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/up_down_grades_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..6a126179e0e94d25f596ae1f93718b7f885a6bc8
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/auth/authorization/rbac/up_down_grades_test.go
@@ -0,0 +1,176 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package rbac
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "slices"
+ "testing"
+
+ "github.com/casbin/casbin/v2"
+ "github.com/casbin/casbin/v2/model"
+ fileadapter "github.com/casbin/casbin/v2/persist/file-adapter"
+ "github.com/stretchr/testify/require"
+
+ "github.com/weaviate/weaviate/usecases/auth/authorization/conv"
+ "github.com/weaviate/weaviate/usecases/config"
+)
+
+func TestUpdateGroupings(t *testing.T) {
+ tests := []struct {
+ name string
+ rolesToAdd []string
+ assignments map[string]string
+ expectedAfterWards map[string]string
+ authNconf config.Authentication
+ }{
+ {
+ name: "only internal - will only be added as db",
+ rolesToAdd: []string{"role:test"},
+ assignments: map[string]string{"user:" + conv.InternalPlaceHolder: "role:test"},
+ expectedAfterWards: map[string]string{"db:" + conv.InternalPlaceHolder: "role:test"},
+ authNconf: config.Authentication{OIDC: config.OIDC{Enabled: true}},
+ },
+ {
+ name: "only oidc enabled - normal user will only be added as oidc",
+ rolesToAdd: []string{"role:test"},
+ assignments: map[string]string{"user:something": "role:test"},
+ expectedAfterWards: map[string]string{"oidc:something": "role:test"},
+ authNconf: config.Authentication{OIDC: config.OIDC{Enabled: true}},
+ },
+ {
+ name: "only db enabled - normal user will only be added as db",
+ rolesToAdd: []string{"role:test"},
+ assignments: map[string]string{"user:something": "role:test"},
+ expectedAfterWards: map[string]string{"db:something": "role:test"},
+ authNconf: config.Authentication{APIKey: config.StaticAPIKey{Enabled: true, Users: []string{"something"}}},
+ },
+ {
+ name: "both enabled - normal user will be added for both",
+ rolesToAdd: []string{"role:test"},
+ assignments: map[string]string{"user:something": "role:test"},
+ expectedAfterWards: map[string]string{"db:something": "role:test", "oidc:something": "role:test"},
+ authNconf: config.Authentication{APIKey: config.StaticAPIKey{Enabled: true, Users: []string{"something"}}, OIDC: config.OIDC{Enabled: true}},
+ },
+ {
+ name: "both enabled but user is not added to api key list- normal user will be added for both",
+ rolesToAdd: []string{"role:test"},
+ assignments: map[string]string{"user:something": "role:test"},
+ expectedAfterWards: map[string]string{"oidc:something": "role:test"},
+ authNconf: config.Authentication{APIKey: config.StaticAPIKey{Enabled: true}, OIDC: config.OIDC{Enabled: true}},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ m, err := model.NewModelFromString(MODEL)
+ require.NoError(t, err)
+
+ enforcer, err := casbin.NewSyncedCachedEnforcer(m)
+ require.NoError(t, err)
+
+ for _, role := range tt.rolesToAdd {
+ _, err := enforcer.AddNamedPolicy("p", role, "*", "R", "*")
+ require.NoError(t, err)
+
+ }
+
+ for user, role := range tt.assignments {
+ _, err := enforcer.AddRoleForUser(user, role)
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, upgradeGroupingsFrom129(enforcer, tt.authNconf))
+ roles, _ := enforcer.GetAllSubjects()
+ require.Len(t, roles, len(tt.rolesToAdd))
+ for user, role := range tt.expectedAfterWards {
+ users, err := enforcer.GetUsersForRole(role)
+ require.NoError(t, err)
+ require.True(t, slices.Contains(users, user))
+ }
+ })
+ }
+}
+
+func TestUpgradeRoles(t *testing.T) {
+ tests := []struct {
+ name string
+ lines []string
+ expectedPolicies [][]string
+ version string
+ }{
+ {
+ name: "skip build in roles",
+ lines: []string{
+ "p, role:other_role, data/collections/.*/shards/.*/objects/.*, R, data",
+ "p, role:viewer, *, R, *",
+ },
+ expectedPolicies: [][]string{
+ {"role:other_role", "data/collections/.*/shards/.*/objects/.*", " R", "data"},
+ },
+ },
+ {
+ name: "upgrade update user if coming from old version",
+ lines: []string{
+ "p, role:some_role, users/.*, U, users",
+ },
+ expectedPolicies: [][]string{
+ {"p, role:some_role, users/.*, A, users"},
+ },
+ },
+ {
+ name: "mixed upgrade and not upgrade and skip",
+ lines: []string{
+ "p, role:some_role, users/.*, U, users",
+ "p, role:other_role, data/collections/.*/shards/.*/objects/.*, R, data",
+ "p, role:viewer, *, R, *",
+ },
+ expectedPolicies: [][]string{
+ {"p, role:some_role, users/.*, A, users, "},
+ {"role:other_role", "data/collections/.*/shards/.*/objects/.*", " R", "data"},
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ path := t.TempDir()
+ tmpFile, err := os.CreateTemp(path, "upgrade-temp-*.tmp")
+ require.NoError(t, err)
+
+ writer := bufio.NewWriter(tmpFile)
+ for _, line := range tt.lines {
+ _, err := fmt.Fprintln(writer, line)
+ require.NoError(t, err)
+ }
+ require.NoError(t, writer.Flush())
+ require.NoError(t, tmpFile.Sync())
+ require.NoError(t, tmpFile.Close())
+
+ m, err := model.NewModelFromString(MODEL)
+ require.NoError(t, err)
+
+ enforcer, err := casbin.NewSyncedCachedEnforcer(m)
+ require.NoError(t, err)
+ enforcer.SetAdapter(fileadapter.NewAdapter(tmpFile.Name()))
+ require.NoError(t, enforcer.LoadPolicy())
+
+ require.NoError(t, upgradePoliciesFrom129(enforcer, false))
+ policies, _ := enforcer.GetPolicy()
+ require.Len(t, policies, len(tt.expectedPolicies))
+ for i, policy := range tt.expectedPolicies {
+ require.Equal(t, tt.expectedPolicies[i], policy)
+ }
+ })
+ }
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/chunk-1.tar.gz b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/chunk-1.tar.gz
new file mode 100644
index 0000000000000000000000000000000000000000..a87b96ab797f696a6fec621159881e379194f564
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/chunk-1.tar.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6c9c7916275093514a2ff0552f86b7bf975ad8e5a13ad27aece239a28dcc7fac
+size 404
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX.hnsw.commitlog.d/1689867524 b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX.hnsw.commitlog.d/1689867524
new file mode 100644
index 0000000000000000000000000000000000000000..ecb84a6d6396684101bc231fd45b138a9a13ade5
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX.hnsw.commitlog.d/1689867524 differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX.indexcount b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX.indexcount
new file mode 100644
index 0000000000000000000000000000000000000000..71c2a58453e27ee89ad7d15b1a5f12433409fff4
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX.indexcount differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX.proplengths b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX.proplengths
new file mode 100644
index 0000000000000000000000000000000000000000..325d412964392adc662ab24c38eef659dbbf462e
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX.proplengths
@@ -0,0 +1 @@
+{"BucketedData":{"title":{"7":2},"title2":{"7":2}},"SumData":{"title":16,"title2":16},"CountData":{"title":2,"title2":2}}
\ No newline at end of file
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX.version b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX.version
new file mode 100644
index 0000000000000000000000000000000000000000..5407bf3ddf8b5ca61b411342fe54921a2bbb0ec2
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX.version differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/dimensions/segment-1689867627555054000.bloom b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/dimensions/segment-1689867627555054000.bloom
new file mode 100644
index 0000000000000000000000000000000000000000..9060aba37881aca278c10af00cc127597927e977
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/dimensions/segment-1689867627555054000.bloom differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/dimensions/segment-1689867627555054000.db b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/dimensions/segment-1689867627555054000.db
new file mode 100644
index 0000000000000000000000000000000000000000..a229e7963465646eeaee7e87603688b8f1a78a80
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/dimensions/segment-1689867627555054000.db differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/objects/segment-1689867627551445000.bloom b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/objects/segment-1689867627551445000.bloom
new file mode 100644
index 0000000000000000000000000000000000000000..33371fdcabffa3203a7e6b5009ad5ec4d415242e
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/objects/segment-1689867627551445000.bloom differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/objects/segment-1689867627551445000.cna b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/objects/segment-1689867627551445000.cna
new file mode 100644
index 0000000000000000000000000000000000000000..cfe4db1afe7b037711ca68f2c2f20954921de6e4
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/objects/segment-1689867627551445000.cna differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/objects/segment-1689867627551445000.db b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/objects/segment-1689867627551445000.db
new file mode 100644
index 0000000000000000000000000000000000000000..1f1cfa73b7c2a302b3a2eb16739e822fe09d2c08
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/objects/segment-1689867627551445000.db differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/objects/segment-1689867627551445000.secondary.0.bloom b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/objects/segment-1689867627551445000.secondary.0.bloom
new file mode 100644
index 0000000000000000000000000000000000000000..e82a308761ececdd675457e46839bd55ec872663
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/objects/segment-1689867627551445000.secondary.0.bloom differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property__id/segment-1689867627561328000.bloom b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property__id/segment-1689867627561328000.bloom
new file mode 100644
index 0000000000000000000000000000000000000000..7f84ca5cd700ed830fd997863a1a734a7ca7a6b4
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property__id/segment-1689867627561328000.bloom differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property__id/segment-1689867627561328000.db b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property__id/segment-1689867627561328000.db
new file mode 100644
index 0000000000000000000000000000000000000000..d5f826f83469e658583077c12b0c2b89de6b52fd
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property__id/segment-1689867627561328000.db differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title/segment-1689867627559163000.bloom b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title/segment-1689867627559163000.bloom
new file mode 100644
index 0000000000000000000000000000000000000000..48e95c2bf79ecf51efc54d8d3d3dba523eb7c9cf
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title/segment-1689867627559163000.bloom differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title/segment-1689867627559163000.db b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title/segment-1689867627559163000.db
new file mode 100644
index 0000000000000000000000000000000000000000..4887e976f439610d9682f3896f44e1bb93dbc623
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title/segment-1689867627559163000.db differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title2/segment-1689867627557281000.bloom b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title2/segment-1689867627557281000.bloom
new file mode 100644
index 0000000000000000000000000000000000000000..48e95c2bf79ecf51efc54d8d3d3dba523eb7c9cf
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title2/segment-1689867627557281000.bloom differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title2/segment-1689867627557281000.db b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title2/segment-1689867627557281000.db
new file mode 100644
index 0000000000000000000000000000000000000000..4887e976f439610d9682f3896f44e1bb93dbc623
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title2/segment-1689867627557281000.db differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title2_searchable/segment-1689867627564376000.bloom b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title2_searchable/segment-1689867627564376000.bloom
new file mode 100644
index 0000000000000000000000000000000000000000..48e95c2bf79ecf51efc54d8d3d3dba523eb7c9cf
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title2_searchable/segment-1689867627564376000.bloom differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title2_searchable/segment-1689867627564376000.db b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title2_searchable/segment-1689867627564376000.db
new file mode 100644
index 0000000000000000000000000000000000000000..90f65683bf3ad2cff1f73f2e8d75767eb4463670
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title2_searchable/segment-1689867627564376000.db differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title_searchable/segment-1689867627563072000.bloom b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title_searchable/segment-1689867627563072000.bloom
new file mode 100644
index 0000000000000000000000000000000000000000..48e95c2bf79ecf51efc54d8d3d3dba523eb7c9cf
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title_searchable/segment-1689867627563072000.bloom differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title_searchable/segment-1689867627563072000.db b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title_searchable/segment-1689867627563072000.db
new file mode 100644
index 0000000000000000000000000000000000000000..90f65683bf3ad2cff1f73f2e8d75767eb4463670
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_cT9eTErXgmTX_lsm/property_title_searchable/segment-1689867627563072000.db differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO.hnsw.commitlog.d/1689867524 b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO.hnsw.commitlog.d/1689867524
new file mode 100644
index 0000000000000000000000000000000000000000..8941e88b070ab226073a83f238f1d7d8db2eaa63
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO.hnsw.commitlog.d/1689867524 differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO.indexcount b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO.indexcount
new file mode 100644
index 0000000000000000000000000000000000000000..20d5cb86e6dff1f3684dc229a358a2ea697cecfb
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO.indexcount differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO.proplengths b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO.proplengths
new file mode 100644
index 0000000000000000000000000000000000000000..e4480e3a67be752fea4bbd377d5accd57f9b02b1
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO.proplengths
@@ -0,0 +1 @@
+{"BucketedData":{"title":{"7":1},"title2":{"7":1}},"SumData":{"title":8,"title2":8},"CountData":{"title":1,"title2":1}}
\ No newline at end of file
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO.version b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO.version
new file mode 100644
index 0000000000000000000000000000000000000000..5407bf3ddf8b5ca61b411342fe54921a2bbb0ec2
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO.version differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/dimensions/segment-1689867537919947000.bloom b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/dimensions/segment-1689867537919947000.bloom
new file mode 100644
index 0000000000000000000000000000000000000000..9060aba37881aca278c10af00cc127597927e977
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/dimensions/segment-1689867537919947000.bloom differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/dimensions/segment-1689867537919947000.db b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/dimensions/segment-1689867537919947000.db
new file mode 100644
index 0000000000000000000000000000000000000000..f10ad8fa53b20a017cbf4f54574c1c622371434b
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/dimensions/segment-1689867537919947000.db differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/objects/segment-1689867537919476000.bloom b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/objects/segment-1689867537919476000.bloom
new file mode 100644
index 0000000000000000000000000000000000000000..626e2cdf8deee117bdcc9bae4fdcf701d09ea3db
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/objects/segment-1689867537919476000.bloom differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/objects/segment-1689867537919476000.cna b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/objects/segment-1689867537919476000.cna
new file mode 100644
index 0000000000000000000000000000000000000000..905daeb2677aa5632f7bd4f79b1dfedbf9cc2f16
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/objects/segment-1689867537919476000.cna differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/objects/segment-1689867537919476000.db b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/objects/segment-1689867537919476000.db
new file mode 100644
index 0000000000000000000000000000000000000000..31149688b59e06535072fa3d1a767f6f0734e171
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/objects/segment-1689867537919476000.db differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/objects/segment-1689867537919476000.secondary.0.bloom b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/objects/segment-1689867537919476000.secondary.0.bloom
new file mode 100644
index 0000000000000000000000000000000000000000..2fb70da5ed1f6114e1c2909d8680f9e0c19faf1f
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/objects/segment-1689867537919476000.secondary.0.bloom differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property__id/segment-1689867537919986000.bloom b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property__id/segment-1689867537919986000.bloom
new file mode 100644
index 0000000000000000000000000000000000000000..b7d1b962d29b424f9cde91ce0115c789ba803708
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property__id/segment-1689867537919986000.bloom differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property__id/segment-1689867537919986000.db b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property__id/segment-1689867537919986000.db
new file mode 100644
index 0000000000000000000000000000000000000000..4053da4e8217daec348667962116b75f3dcf06b7
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property__id/segment-1689867537919986000.db differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title/segment-1689867537919997000.bloom b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title/segment-1689867537919997000.bloom
new file mode 100644
index 0000000000000000000000000000000000000000..20821fac9ace2f3c3682d8d43aa0ce399800abd5
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title/segment-1689867537919997000.bloom differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title/segment-1689867537919997000.db b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title/segment-1689867537919997000.db
new file mode 100644
index 0000000000000000000000000000000000000000..f05a3103011b99f8e1e0efe4b4edf68e70bf1a3d
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title/segment-1689867537919997000.db differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title2/segment-1689867537919968000.bloom b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title2/segment-1689867537919968000.bloom
new file mode 100644
index 0000000000000000000000000000000000000000..20821fac9ace2f3c3682d8d43aa0ce399800abd5
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title2/segment-1689867537919968000.bloom differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title2/segment-1689867537919968000.db b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title2/segment-1689867537919968000.db
new file mode 100644
index 0000000000000000000000000000000000000000..f05a3103011b99f8e1e0efe4b4edf68e70bf1a3d
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title2/segment-1689867537919968000.db differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title2_searchable/segment-1689867537920222000.bloom b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title2_searchable/segment-1689867537920222000.bloom
new file mode 100644
index 0000000000000000000000000000000000000000..20821fac9ace2f3c3682d8d43aa0ce399800abd5
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title2_searchable/segment-1689867537920222000.bloom differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title2_searchable/segment-1689867537920222000.db b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title2_searchable/segment-1689867537920222000.db
new file mode 100644
index 0000000000000000000000000000000000000000..6f44e5fb69c876145aeaa82ff851a8a076912eee
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title2_searchable/segment-1689867537920222000.db differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title_searchable/segment-1689867537920330000.bloom b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title_searchable/segment-1689867537920330000.bloom
new file mode 100644
index 0000000000000000000000000000000000000000..20821fac9ace2f3c3682d8d43aa0ce399800abd5
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title_searchable/segment-1689867537920330000.bloom differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title_searchable/segment-1689867537920330000.db b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title_searchable/segment-1689867537920330000.db
new file mode 100644
index 0000000000000000000000000000000000000000..6f44e5fb69c876145aeaa82ff851a8a076912eee
Binary files /dev/null and b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/article_uq5WKfRlq2mO_lsm/property_title_searchable/segment-1689867537920330000.db differ
diff --git a/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/files.json b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/files.json
new file mode 100644
index 0000000000000000000000000000000000000000..e49ab69a27c564bc3e3e1f15003b003cf8406b40
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/backup/test_data/node1/files.json
@@ -0,0 +1,42 @@
+[
+ "article_cT9eTErXgmTX.hnsw.commitlog.d/1689867524",
+ "article_cT9eTErXgmTX.indexcount",
+ "article_cT9eTErXgmTX.proplengths",
+ "article_cT9eTErXgmTX.version",
+ "article_cT9eTErXgmTX_lsm/dimensions/segment-1689867627555054000.bloom",
+ "article_cT9eTErXgmTX_lsm/dimensions/segment-1689867627555054000.db",
+ "article_cT9eTErXgmTX_lsm/objects/segment-1689867627551445000.bloom",
+ "article_cT9eTErXgmTX_lsm/objects/segment-1689867627551445000.cna",
+ "article_cT9eTErXgmTX_lsm/objects/segment-1689867627551445000.db",
+ "article_cT9eTErXgmTX_lsm/objects/segment-1689867627551445000.secondary.0.bloom",
+ "article_cT9eTErXgmTX_lsm/property__id/segment-1689867627561328000.bloom",
+ "article_cT9eTErXgmTX_lsm/property__id/segment-1689867627561328000.db",
+ "article_cT9eTErXgmTX_lsm/property_title/segment-1689867627559163000.bloom",
+ "article_cT9eTErXgmTX_lsm/property_title/segment-1689867627559163000.db",
+ "article_cT9eTErXgmTX_lsm/property_title2/segment-1689867627557281000.bloom",
+ "article_cT9eTErXgmTX_lsm/property_title2/segment-1689867627557281000.db",
+ "article_cT9eTErXgmTX_lsm/property_title2_searchable/segment-1689867627564376000.bloom",
+ "article_cT9eTErXgmTX_lsm/property_title2_searchable/segment-1689867627564376000.db",
+ "article_cT9eTErXgmTX_lsm/property_title_searchable/segment-1689867627563072000.bloom",
+ "article_cT9eTErXgmTX_lsm/property_title_searchable/segment-1689867627563072000.db",
+ "article_uq5WKfRlq2mO.hnsw.commitlog.d/1689867524",
+ "article_uq5WKfRlq2mO.indexcount",
+ "article_uq5WKfRlq2mO.proplengths",
+ "article_uq5WKfRlq2mO.version",
+ "article_uq5WKfRlq2mO_lsm/dimensions/segment-1689867537919947000.bloom",
+ "article_uq5WKfRlq2mO_lsm/dimensions/segment-1689867537919947000.db",
+ "article_uq5WKfRlq2mO_lsm/objects/segment-1689867537919476000.bloom",
+ "article_uq5WKfRlq2mO_lsm/objects/segment-1689867537919476000.cna",
+ "article_uq5WKfRlq2mO_lsm/objects/segment-1689867537919476000.db",
+ "article_uq5WKfRlq2mO_lsm/objects/segment-1689867537919476000.secondary.0.bloom",
+ "article_uq5WKfRlq2mO_lsm/property__id/segment-1689867537919986000.bloom",
+ "article_uq5WKfRlq2mO_lsm/property__id/segment-1689867537919986000.db",
+ "article_uq5WKfRlq2mO_lsm/property_title/segment-1689867537919997000.bloom",
+ "article_uq5WKfRlq2mO_lsm/property_title/segment-1689867537919997000.db",
+ "article_uq5WKfRlq2mO_lsm/property_title2/segment-1689867537919968000.bloom",
+ "article_uq5WKfRlq2mO_lsm/property_title2/segment-1689867537919968000.db",
+ "article_uq5WKfRlq2mO_lsm/property_title2_searchable/segment-1689867537920222000.bloom",
+ "article_uq5WKfRlq2mO_lsm/property_title2_searchable/segment-1689867537920222000.db",
+ "article_uq5WKfRlq2mO_lsm/property_title_searchable/segment-1689867537920330000.bloom",
+ "article_uq5WKfRlq2mO_lsm/property_title_searchable/segment-1689867537920330000.db"
+]
\ No newline at end of file
diff --git a/platform/dbops/binaries/weaviate-src/usecases/classification/integrationtest/classifier_integration_test.go b/platform/dbops/binaries/weaviate-src/usecases/classification/integrationtest/classifier_integration_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..3d9b2fd6a763fc412d436f3d05bb34c0921102d2
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/classification/integrationtest/classifier_integration_test.go
@@ -0,0 +1,336 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+//go:build integrationTest
+
+package classification_integration_test
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+ "time"
+
+ schemaUC "github.com/weaviate/weaviate/usecases/schema"
+ "github.com/weaviate/weaviate/usecases/sharding"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/sirupsen/logrus/hooks/test"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+ "github.com/weaviate/weaviate/adapters/repos/db"
+ replicationTypes "github.com/weaviate/weaviate/cluster/replication/types"
+ "github.com/weaviate/weaviate/entities/dto"
+ "github.com/weaviate/weaviate/entities/filters"
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/entities/schema"
+ testhelper "github.com/weaviate/weaviate/test/helper"
+ "github.com/weaviate/weaviate/usecases/auth/authorization/mocks"
+ "github.com/weaviate/weaviate/usecases/classification"
+ "github.com/weaviate/weaviate/usecases/cluster"
+ "github.com/weaviate/weaviate/usecases/memwatch"
+ "github.com/weaviate/weaviate/usecases/objects"
+)
+
+func Test_Classifier_KNN_SaveConsistency(t *testing.T) {
+ dirName := t.TempDir()
+ logger, _ := test.NewNullLogger()
+ var id strfmt.UUID
+
+ shardState := singleShardState()
+ sg := &fakeSchemaGetter{
+ schema: schema.Schema{Objects: &models.Schema{Classes: nil}},
+ shardState: shardState,
+ }
+
+ mockSchemaReader := schemaUC.NewMockSchemaReader(t)
+ mockSchemaReader.EXPECT().Shards(mock.Anything).Return(shardState.AllPhysicalShards(), nil).Maybe()
+ mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error {
+ class := &models.Class{Class: className}
+ return readFunc(class, shardState)
+ }).Maybe()
+ mockSchemaReader.EXPECT().ReadOnlySchema().Return(models.Schema{Classes: nil}).Maybe()
+ mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe()
+ mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t)
+ mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe()
+ mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe()
+ mockNodeSelector := cluster.NewMockNodeSelector(t)
+ mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe()
+ mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe()
+ vrepo, err := db.New(logger, "node1", db.Config{
+ MemtablesFlushDirtyAfter: 60,
+ RootPath: dirName,
+ QueryMaximumResults: 10000,
+ MaxImportGoroutinesFactor: 1,
+ }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(),
+ mockNodeSelector, mockSchemaReader, mockReplicationFSMReader)
+ require.Nil(t, err)
+ vrepo.SetSchemaGetter(sg)
+ require.Nil(t, vrepo.WaitForStartup(context.Background()))
+ migrator := db.NewMigrator(vrepo, logger, "node1")
+
+ // so we can reuse it for follow up requests, such as checking the status
+ size := 400
+ data := largeTestDataSize(size)
+
+ t.Run("preparations", func(t *testing.T) {
+ t.Run("creating the classes", func(t *testing.T) {
+ for _, c := range testSchema().Objects.Classes {
+ require.Nil(t,
+ migrator.AddClass(context.Background(), c))
+ }
+
+ sg.schema = testSchema()
+ })
+
+ t.Run("importing the training data", func(t *testing.T) {
+ classified := testDataAlreadyClassified()
+ bt := make(objects.BatchObjects, len(classified))
+ for i, elem := range classified {
+ bt[i] = objects.BatchObject{
+ OriginalIndex: i,
+ UUID: elem.ID,
+ Object: elem.Object(),
+ }
+ }
+
+ res, err := vrepo.BatchPutObjects(context.Background(), bt, nil, 0)
+ require.Nil(t, err)
+ for _, elem := range res {
+ require.Nil(t, elem.Err)
+ }
+ })
+
+ t.Run("importing the to be classified data", func(t *testing.T) {
+ bt := make(objects.BatchObjects, size)
+ for i, elem := range data {
+ bt[i] = objects.BatchObject{
+ OriginalIndex: i,
+ UUID: elem.ID,
+ Object: elem.Object(),
+ }
+ }
+ res, err := vrepo.BatchPutObjects(context.Background(), bt, nil, 0)
+ require.Nil(t, err)
+ for _, elem := range res {
+ require.Nil(t, elem.Err)
+ }
+ })
+ })
+
+ t.Run("classification journey", func(t *testing.T) {
+ repo := newFakeClassificationRepo()
+ authorizer := mocks.NewMockAuthorizer()
+ classifier := classification.New(sg, repo, vrepo, authorizer, logger, nil)
+
+ params := models.Classification{
+ Class: "Article",
+ BasedOnProperties: []string{"description"},
+ ClassifyProperties: []string{"exactCategory", "mainCategory"},
+ Settings: map[string]interface{}{
+ "k": json.Number("1"),
+ },
+ }
+
+ t.Run("scheduling a classification", func(t *testing.T) {
+ class, err := classifier.Schedule(context.Background(), nil, params)
+ require.Nil(t, err, "should not error")
+ require.NotNil(t, class)
+
+ assert.Len(t, class.ID, 36, "an id was assigned")
+ id = class.ID
+ })
+
+ t.Run("retrieving the same classification by id", func(t *testing.T) {
+ class, err := classifier.Get(context.Background(), nil, id)
+ require.Nil(t, err)
+ require.NotNil(t, class)
+ assert.Equal(t, id, class.ID)
+ assert.Equal(t, models.ClassificationStatusRunning, class.Status)
+ })
+
+ waitForStatusToNoLongerBeRunning(t, classifier, id)
+
+ t.Run("status is now completed", func(t *testing.T) {
+ class, err := classifier.Get(context.Background(), nil, id)
+ require.Nil(t, err)
+ require.NotNil(t, class)
+ assert.Equal(t, models.ClassificationStatusCompleted, class.Status)
+ assert.Equal(t, int64(400), class.Meta.CountSucceeded)
+ })
+
+ t.Run("verify everything is classified", func(t *testing.T) {
+ filter := filters.LocalFilter{
+ Root: &filters.Clause{
+ Operator: filters.OperatorEqual,
+ On: &filters.Path{
+ Class: "Article",
+ Property: "exactCategory",
+ },
+ Value: &filters.Value{
+ Value: 0,
+ Type: schema.DataTypeInt,
+ },
+ },
+ }
+ res, err := vrepo.Search(context.Background(), dto.GetParams{
+ ClassName: "Article",
+ Filters: &filter,
+ Pagination: &filters.Pagination{
+ Limit: 10000,
+ },
+ })
+
+ require.Nil(t, err)
+ assert.Equal(t, 0, len(res))
+ })
+ })
+}
+
+func Test_Classifier_ZeroShot_SaveConsistency(t *testing.T) {
+ t.Skip()
+ dirName := t.TempDir()
+
+ logger, _ := test.NewNullLogger()
+ var id strfmt.UUID
+
+ shardState := singleShardState()
+ sg := &fakeSchemaGetter{shardState: shardState}
+
+ mockSchemaReader := schemaUC.NewMockSchemaReader(t)
+ mockSchemaReader.EXPECT().Read(mock.Anything, mock.Anything).RunAndReturn(func(className string, readFunc func(*models.Class, *sharding.State) error) error {
+ class := &models.Class{Class: className}
+ return readFunc(class, shardState)
+ }).Maybe()
+ mockSchemaReader.EXPECT().ShardReplicas(mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe()
+ mockReplicationFSMReader := replicationTypes.NewMockReplicationFSMReader(t)
+ mockReplicationFSMReader.EXPECT().FilterOneShardReplicasRead(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}).Maybe()
+ mockReplicationFSMReader.EXPECT().FilterOneShardReplicasWrite(mock.Anything, mock.Anything, mock.Anything).Return([]string{"node1"}, nil).Maybe()
+ mockNodeSelector := cluster.NewMockNodeSelector(t)
+ mockNodeSelector.EXPECT().LocalName().Return("node1").Maybe()
+ mockNodeSelector.EXPECT().NodeHostname(mock.Anything).Return("node1", true).Maybe()
+ vrepo, err := db.New(logger, "node1", db.Config{
+ RootPath: dirName,
+ QueryMaximumResults: 10000,
+ MaxImportGoroutinesFactor: 1,
+ }, &fakeRemoteClient{}, &fakeNodeResolver{}, &fakeRemoteNodeClient{}, &fakeReplicationClient{}, nil, memwatch.NewDummyMonitor(),
+ mockNodeSelector, mockSchemaReader, mockReplicationFSMReader)
+ require.Nil(t, err)
+ vrepo.SetSchemaGetter(sg)
+ require.Nil(t, vrepo.WaitForStartup(context.Background()))
+ migrator := db.NewMigrator(vrepo, logger, "node1")
+
+ t.Run("preparations", func(t *testing.T) {
+ t.Run("creating the classes", func(t *testing.T) {
+ for _, c := range testSchemaForZeroShot().Objects.Classes {
+ require.Nil(t,
+ migrator.AddClass(context.Background(), c))
+ }
+
+ sg.schema = testSchemaForZeroShot()
+ })
+
+ t.Run("importing the training data", func(t *testing.T) {
+ classified := testDataZeroShotUnclassified()
+ bt := make(objects.BatchObjects, len(classified))
+ for i, elem := range classified {
+ bt[i] = objects.BatchObject{
+ OriginalIndex: i,
+ UUID: elem.ID,
+ Object: elem.Object(),
+ }
+ }
+
+ res, err := vrepo.BatchPutObjects(context.Background(), bt, nil, 0)
+ require.Nil(t, err)
+ for _, elem := range res {
+ require.Nil(t, elem.Err)
+ }
+ })
+ })
+
+ t.Run("classification journey", func(t *testing.T) {
+ repo := newFakeClassificationRepo()
+ authorizer := mocks.NewMockAuthorizer()
+ classifier := classification.New(sg, repo, vrepo, authorizer, logger, nil)
+
+ params := models.Classification{
+ Class: "Recipes",
+ BasedOnProperties: []string{"text"},
+ ClassifyProperties: []string{"ofFoodType"},
+ Type: "zeroshot",
+ }
+
+ t.Run("scheduling a classification", func(t *testing.T) {
+ class, err := classifier.Schedule(context.Background(), nil, params)
+ require.Nil(t, err, "should not error")
+ require.NotNil(t, class)
+
+ assert.Len(t, class.ID, 36, "an id was assigned")
+ id = class.ID
+ })
+
+ t.Run("retrieving the same classification by id", func(t *testing.T) {
+ class, err := classifier.Get(context.Background(), nil, id)
+ require.Nil(t, err)
+ require.NotNil(t, class)
+ assert.Equal(t, id, class.ID)
+ assert.Equal(t, models.ClassificationStatusRunning, class.Status)
+ })
+
+ waitForStatusToNoLongerBeRunning(t, classifier, id)
+
+ t.Run("status is now completed", func(t *testing.T) {
+ class, err := classifier.Get(context.Background(), nil, id)
+ require.Nil(t, err)
+ require.NotNil(t, class)
+ assert.Equal(t, models.ClassificationStatusCompleted, class.Status)
+ assert.Equal(t, int64(2), class.Meta.CountSucceeded)
+ })
+
+ t.Run("verify everything is classified", func(t *testing.T) {
+ filter := filters.LocalFilter{
+ Root: &filters.Clause{
+ Operator: filters.OperatorEqual,
+ On: &filters.Path{
+ Class: "Recipes",
+ Property: "ofFoodType",
+ },
+ Value: &filters.Value{
+ Value: 0,
+ Type: schema.DataTypeInt,
+ },
+ },
+ }
+ res, err := vrepo.Search(context.Background(), dto.GetParams{
+ ClassName: "Recipes",
+ Filters: &filter,
+ Pagination: &filters.Pagination{
+ Limit: 100000,
+ },
+ })
+
+ require.Nil(t, err)
+ assert.Equal(t, 0, len(res))
+ })
+ })
+}
+
+func waitForStatusToNoLongerBeRunning(t *testing.T, classifier *classification.Classifier, id strfmt.UUID) {
+ testhelper.AssertEventuallyEqualWithFrequencyAndTimeout(t, true, func() interface{} {
+ class, err := classifier.Get(context.Background(), nil, id)
+ require.Nil(t, err)
+ require.NotNil(t, class)
+
+ return class.Status != models.ClassificationStatusRunning
+ }, 100*time.Millisecond, 20*time.Second, "wait until status in no longer running")
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/classification/integrationtest/fakes_for_integration_test.go b/platform/dbops/binaries/weaviate-src/usecases/classification/integrationtest/fakes_for_integration_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..6f380161f4067a8c17f31537839aeddf87d09b10
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/classification/integrationtest/fakes_for_integration_test.go
@@ -0,0 +1,654 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+//go:build integrationTest
+// +build integrationTest
+
+package classification_integration_test
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "math/rand"
+ "sync"
+ "time"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/google/uuid"
+ "github.com/weaviate/weaviate/cluster/router/types"
+ "github.com/weaviate/weaviate/entities/additional"
+ "github.com/weaviate/weaviate/entities/aggregation"
+ "github.com/weaviate/weaviate/entities/dto"
+ "github.com/weaviate/weaviate/entities/filters"
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/entities/schema"
+ "github.com/weaviate/weaviate/entities/search"
+ "github.com/weaviate/weaviate/entities/searchparams"
+ "github.com/weaviate/weaviate/entities/storobj"
+ enthnsw "github.com/weaviate/weaviate/entities/vectorindex/hnsw"
+ "github.com/weaviate/weaviate/usecases/cluster/mocks"
+ "github.com/weaviate/weaviate/usecases/config"
+ "github.com/weaviate/weaviate/usecases/file"
+ "github.com/weaviate/weaviate/usecases/objects"
+ "github.com/weaviate/weaviate/usecases/replica"
+ "github.com/weaviate/weaviate/usecases/replica/hashtree"
+ "github.com/weaviate/weaviate/usecases/sharding"
+ shardingConfig "github.com/weaviate/weaviate/usecases/sharding/config"
+)
+
+type fakeSchemaGetter struct {
+ schema schema.Schema
+ shardState *sharding.State
+}
+
+func (f *fakeSchemaGetter) GetSchemaSkipAuth() schema.Schema {
+ return f.schema
+}
+
+func (f *fakeSchemaGetter) ReadOnlyClass(className string) *models.Class {
+ return f.schema.GetClass(className)
+}
+
+func (f *fakeSchemaGetter) ResolveAlias(string) string {
+ return ""
+}
+
+func (f *fakeSchemaGetter) GetAliasesForClass(string) []*models.Alias {
+ return nil
+}
+
+func (f *fakeSchemaGetter) CopyShardingState(class string) *sharding.State {
+ return f.shardState
+}
+
+func (f *fakeSchemaGetter) ShardOwner(class, shard string) (string, error) {
+ ss := f.shardState
+ x, ok := ss.Physical[shard]
+ if !ok {
+ return "", fmt.Errorf("shard not found")
+ }
+ if len(x.BelongsToNodes) < 1 || x.BelongsToNodes[0] == "" {
+ return "", fmt.Errorf("owner node not found")
+ }
+ return ss.Physical[shard].BelongsToNodes[0], nil
+}
+
+func (f *fakeSchemaGetter) ShardReplicas(class, shard string) ([]string, error) {
+ ss := f.shardState
+ x, ok := ss.Physical[shard]
+ if !ok {
+ return nil, fmt.Errorf("shard not found")
+ }
+ return x.BelongsToNodes, nil
+}
+
+func (f *fakeSchemaGetter) TenantsShards(_ context.Context, class string, tenants ...string) (map[string]string, error) {
+ res := map[string]string{}
+ for _, t := range tenants {
+ res[t] = models.TenantActivityStatusHOT
+ }
+ return res, nil
+}
+
+func (f *fakeSchemaGetter) OptimisticTenantStatus(_ context.Context, class string, tenant string) (map[string]string, error) {
+ res := map[string]string{}
+ res[tenant] = models.TenantActivityStatusHOT
+ return res, nil
+}
+
+func (f *fakeSchemaGetter) ShardFromUUID(class string, uuid []byte) string {
+ ss := f.shardState
+ return ss.Shard("", string(uuid))
+}
+
+func (f *fakeSchemaGetter) Nodes() []string {
+ return []string{"node1"}
+}
+
+func (m *fakeSchemaGetter) NodeName() string {
+ return "node1"
+}
+
+func (m *fakeSchemaGetter) ClusterHealthScore() int {
+ return 0
+}
+
+func (m *fakeSchemaGetter) Statistics() map[string]any {
+ return nil
+}
+
+func (m *fakeSchemaGetter) ResolveParentNodes(_ string, shard string,
+) (map[string]string, error) {
+ return nil, nil
+}
+
+func singleShardState() *sharding.State {
+ config, err := shardingConfig.ParseConfig(nil, 1)
+ if err != nil {
+ panic(err)
+ }
+
+ selector := mocks.NewMockNodeSelector("node1")
+ s, err := sharding.InitState("test-index", config, selector.LocalName(),
+ selector.StorageCandidates(), 1, false)
+ if err != nil {
+ panic(err)
+ }
+
+ return s
+}
+
+type fakeClassificationRepo struct {
+ sync.Mutex
+ db map[strfmt.UUID]models.Classification
+}
+
+func newFakeClassificationRepo() *fakeClassificationRepo {
+ return &fakeClassificationRepo{
+ db: map[strfmt.UUID]models.Classification{},
+ }
+}
+
+func (f *fakeClassificationRepo) Put(ctx context.Context, class models.Classification) error {
+ f.Lock()
+ defer f.Unlock()
+
+ f.db[class.ID] = class
+ return nil
+}
+
+func (f *fakeClassificationRepo) Get(ctx context.Context, id strfmt.UUID) (*models.Classification, error) {
+ f.Lock()
+ defer f.Unlock()
+
+ class, ok := f.db[id]
+ if !ok {
+ return nil, nil
+ }
+
+ return &class, nil
+}
+
+func testSchema() schema.Schema {
+ return schema.Schema{
+ Objects: &models.Schema{
+ Classes: []*models.Class{
+ {
+ Class: "ExactCategory",
+ VectorIndexConfig: enthnsw.NewDefaultUserConfig(),
+ InvertedIndexConfig: invertedConfig(),
+ },
+ {
+ Class: "MainCategory",
+ VectorIndexConfig: enthnsw.NewDefaultUserConfig(),
+ InvertedIndexConfig: invertedConfig(),
+ },
+ {
+ Class: "Article",
+ VectorIndexConfig: enthnsw.NewDefaultUserConfig(),
+ InvertedIndexConfig: invertedConfig(),
+ Properties: []*models.Property{
+ {
+ Name: "description",
+ DataType: []string{string(schema.DataTypeText)},
+ },
+ {
+ Name: "name",
+ DataType: schema.DataTypeText.PropString(),
+ Tokenization: models.PropertyTokenizationWhitespace,
+ },
+ {
+ Name: "exactCategory",
+ DataType: []string{"ExactCategory"},
+ },
+ {
+ Name: "mainCategory",
+ DataType: []string{"MainCategory"},
+ },
+ {
+ Name: "categories",
+ DataType: []string{"ExactCategory"},
+ },
+ {
+ Name: "anyCategory",
+ DataType: []string{"MainCategory", "ExactCategory"},
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+// only used for knn-type
+func testDataAlreadyClassified() search.Results {
+ return search.Results{
+ search.Result{
+ ID: "8aeecd06-55a0-462c-9853-81b31a284d80",
+ ClassName: "Article",
+ Vector: []float32{1, 0, 0},
+ Schema: map[string]interface{}{
+ "description": "This article talks about politics",
+ "exactCategory": models.MultipleRef{beaconRef(idCategoryPolitics)},
+ "mainCategory": models.MultipleRef{beaconRef(idMainCategoryPoliticsAndSociety)},
+ },
+ },
+ search.Result{
+ ID: "9f4c1847-2567-4de7-8861-34cf47a071ae",
+ ClassName: "Article",
+ Vector: []float32{0, 1, 0},
+ Schema: map[string]interface{}{
+ "description": "This articles talks about society",
+ "exactCategory": models.MultipleRef{beaconRef(idCategorySociety)},
+ "mainCategory": models.MultipleRef{beaconRef(idMainCategoryPoliticsAndSociety)},
+ },
+ },
+ search.Result{
+ ID: "926416ec-8fb1-4e40-ab8c-37b226b3d68e",
+ ClassName: "Article",
+ Vector: []float32{0, 0, 1},
+ Schema: map[string]interface{}{
+ "description": "This article talks about food",
+ "exactCategory": models.MultipleRef{beaconRef(idCategoryFoodAndDrink)},
+ "mainCategory": models.MultipleRef{beaconRef(idMainCategoryFoodAndDrink)},
+ },
+ },
+ }
+}
+
+// only used for zeroshot-type
+func testDataZeroShotUnclassified() search.Results {
+ return search.Results{
+ search.Result{
+ ID: "8aeecd06-55a0-462c-9853-81b31a284d80",
+ ClassName: "FoodType",
+ Vector: []float32{1, 0, 0},
+ Schema: map[string]interface{}{
+ "text": "Ice cream",
+ },
+ },
+ search.Result{
+ ID: "9f4c1847-2567-4de7-8861-34cf47a071ae",
+ ClassName: "FoodType",
+ Vector: []float32{0, 1, 0},
+ Schema: map[string]interface{}{
+ "text": "Meat",
+ },
+ },
+ search.Result{
+ ID: "926416ec-8fb1-4e40-ab8c-37b226b3d68e",
+ ClassName: "Recipes",
+ Vector: []float32{0, 0, 1},
+ Schema: map[string]interface{}{
+ "text": "Cut the steak in half and put it into pan",
+ },
+ },
+ search.Result{
+ ID: "926416ec-8fb1-4e40-ab8c-37b226b3d688",
+ ClassName: "Recipes",
+ Vector: []float32{0, 1, 1},
+ Schema: map[string]interface{}{
+ "description": "There are flavors of vanilla, chocolate and strawberry",
+ },
+ },
+ }
+}
+
+func mustUUID() strfmt.UUID {
+ id, err := uuid.NewRandom()
+ if err != nil {
+ panic(err)
+ }
+
+ return strfmt.UUID(id.String())
+}
+
+func largeTestDataSize(size int) search.Results {
+ out := make(search.Results, size)
+
+ for i := range out {
+ out[i] = search.Result{
+ ID: mustUUID(),
+ ClassName: "Article",
+ Vector: []float32{0.02, 0, rand.Float32()},
+ Schema: map[string]interface{}{
+ "description": "does not matter much",
+ },
+ }
+ }
+ return out
+}
+
+func beaconRef(target string) *models.SingleRef {
+ beacon := fmt.Sprintf("weaviate://localhost/%s", target)
+ return &models.SingleRef{Beacon: strfmt.URI(beacon)}
+}
+
+const (
+ idMainCategoryPoliticsAndSociety = "39c6abe3-4bbe-4c4e-9e60-ca5e99ec6b4e"
+ idMainCategoryFoodAndDrink = "5a3d909a-4f0d-4168-8f5c-cd3074d1e79a"
+ idCategoryPolitics = "1b204f16-7da6-44fd-bbd2-8cc4a7414bc3"
+ idCategorySociety = "ec500f39-1dc9-4580-9bd1-55a8ea8e37a2"
+ idCategoryFoodAndDrink = "027b708a-31ca-43ea-9001-88bec864c79c"
+)
+
+func invertedConfig() *models.InvertedIndexConfig {
+ return &models.InvertedIndexConfig{
+ CleanupIntervalSeconds: 60,
+ UsingBlockMaxWAND: config.DefaultUsingBlockMaxWAND,
+ }
+}
+
+func testSchemaForZeroShot() schema.Schema {
+ return schema.Schema{
+ Objects: &models.Schema{
+ Classes: []*models.Class{
+ {
+ Class: "FoodType",
+ VectorIndexConfig: enthnsw.NewDefaultUserConfig(),
+ InvertedIndexConfig: invertedConfig(),
+ Properties: []*models.Property{
+ {
+ Name: "text",
+ DataType: []string{string(schema.DataTypeText)},
+ },
+ },
+ },
+ {
+ Class: "Recipes",
+ VectorIndexConfig: enthnsw.NewDefaultUserConfig(),
+ InvertedIndexConfig: invertedConfig(),
+ Properties: []*models.Property{
+ {
+ Name: "text",
+ DataType: []string{string(schema.DataTypeText)},
+ },
+ {
+ Name: "ofFoodType",
+ DataType: []string{"FoodType"},
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+type fakeRemoteClient struct{}
+
+func (f *fakeRemoteClient) PutObject(ctx context.Context, hostName, indexName,
+ shardName string, obj *storobj.Object, schemaVersion uint64,
+) error {
+ return nil
+}
+
+func (f *fakeRemoteClient) PutFile(ctx context.Context, hostName, indexName,
+ shardName, fileName string, payload io.ReadSeekCloser,
+) error {
+ return nil
+}
+
+func (f *fakeRemoteClient) GetFileMetadata(ctx context.Context, hostName, indexName, shardName, fileName string) (file.FileMetadata, error) {
+ return file.FileMetadata{}, nil
+}
+
+func (f *fakeRemoteClient) GetFile(ctx context.Context, hostName, indexName, shardName, fileName string) (io.ReadCloser, error) {
+ return nil, nil
+}
+
+func (f *fakeRemoteClient) PauseFileActivity(ctx context.Context,
+ hostName, indexName, shardName string, schemaVersion uint64,
+) error {
+ return nil
+}
+
+func (f *fakeRemoteClient) ResumeFileActivity(ctx context.Context,
+ hostName, indexName, shardName string,
+) error {
+ return nil
+}
+
+func (f *fakeRemoteClient) ListFiles(ctx context.Context,
+ hostName, indexName, shardName string,
+) ([]string, error) {
+ return nil, nil
+}
+
+func (f *fakeRemoteClient) AddAsyncReplicationTargetNode(ctx context.Context, hostName, indexName, shardName string, targetNodeOverride additional.AsyncReplicationTargetNodeOverride, schemaVersion uint64) error {
+ return nil
+}
+
+func (f *fakeRemoteClient) RemoveAsyncReplicationTargetNode(ctx context.Context, hostName, indexName, shardName string, targetNodeOverride additional.AsyncReplicationTargetNodeOverride) error {
+ return nil
+}
+
+func (f *fakeRemoteClient) GetObject(ctx context.Context, hostName, indexName,
+ shardName string, id strfmt.UUID, props search.SelectProperties,
+ additional additional.Properties,
+) (*storobj.Object, error) {
+ return nil, nil
+}
+
+func (f *fakeRemoteClient) FindObject(ctx context.Context, hostName, indexName,
+ shardName string, id strfmt.UUID, props search.SelectProperties,
+ additional additional.Properties,
+) (*storobj.Object, error) {
+ return nil, nil
+}
+
+func (f *fakeRemoteClient) OverwriteObjects(ctx context.Context,
+ host, index, shard string, objects []*objects.VObject,
+) ([]types.RepairResponse, error) {
+ return nil, nil
+}
+
+func (f *fakeRemoteClient) Exists(ctx context.Context, hostName, indexName,
+ shardName string, id strfmt.UUID,
+) (bool, error) {
+ return false, nil
+}
+
+func (f *fakeRemoteClient) DeleteObject(ctx context.Context, hostName, indexName,
+ shardName string, id strfmt.UUID, deletionTime time.Time, schemaVersion uint64,
+) error {
+ return nil
+}
+
+func (f *fakeRemoteClient) MergeObject(ctx context.Context, hostName, indexName,
+ shardName string, mergeDoc objects.MergeDocument, schemaVersion uint64,
+) error {
+ return nil
+}
+
+func (f *fakeRemoteClient) SearchShard(ctx context.Context, hostName, indexName,
+ shardName string, vector []models.Vector, targetVector []string, distance float32, limit int, filters *filters.LocalFilter,
+ keywordRanking *searchparams.KeywordRanking, sort []filters.Sort,
+ cursor *filters.Cursor, groupBy *searchparams.GroupBy, additional additional.Properties, targetCombination *dto.TargetCombination,
+ properties []string,
+) ([]*storobj.Object, []float32, error) {
+ return nil, nil, nil
+}
+
+func (f *fakeRemoteClient) BatchPutObjects(ctx context.Context, hostName, indexName, shardName string, objs []*storobj.Object, repl *additional.ReplicationProperties, schemaVersion uint64) []error {
+ return nil
+}
+
+func (f *fakeRemoteClient) MultiGetObjects(ctx context.Context, hostName, indexName,
+ shardName string, ids []strfmt.UUID,
+) ([]*storobj.Object, error) {
+ return nil, nil
+}
+
+func (f *fakeRemoteClient) BatchAddReferences(ctx context.Context, hostName,
+ indexName, shardName string, refs objects.BatchReferences, schemaVersion uint64,
+) []error {
+ return nil
+}
+
+func (f *fakeRemoteClient) Aggregate(ctx context.Context, hostName, indexName,
+ shardName string, params aggregation.Params,
+) (*aggregation.Result, error) {
+ return nil, nil
+}
+
+func (f *fakeRemoteClient) FindUUIDs(ctx context.Context, hostName, indexName, shardName string,
+ filters *filters.LocalFilter,
+) ([]strfmt.UUID, error) {
+ return nil, nil
+}
+
+func (f *fakeRemoteClient) DeleteObjectBatch(ctx context.Context, hostName, indexName, shardName string,
+ uuids []strfmt.UUID, deletionTime time.Time, dryRun bool, schemaVersion uint64,
+) objects.BatchSimpleObjects {
+ return nil
+}
+
+func (f *fakeRemoteClient) GetShardQueueSize(ctx context.Context,
+ hostName, indexName, shardName string,
+) (int64, error) {
+ return 0, nil
+}
+
+func (f *fakeRemoteClient) GetShardStatus(ctx context.Context,
+ hostName, indexName, shardName string,
+) (string, error) {
+ return "", nil
+}
+
+func (f *fakeRemoteClient) UpdateShardStatus(ctx context.Context, hostName, indexName, shardName,
+ targetStatus string, schemaVersion uint64,
+) error {
+ return nil
+}
+
+func (f *fakeRemoteClient) DigestObjects(ctx context.Context,
+ hostName, indexName, shardName string, ids []strfmt.UUID,
+) (result []types.RepairResponse, err error) {
+ return nil, nil
+}
+
+type fakeNodeResolver struct{}
+
+func (f *fakeNodeResolver) AllHostnames() []string {
+ return nil
+}
+
+func (f *fakeNodeResolver) NodeHostname(string) (string, bool) {
+ return "", false
+}
+
+type fakeRemoteNodeClient struct{}
+
+func (f *fakeRemoteNodeClient) GetNodeStatus(ctx context.Context, hostName, className, shardName, output string) (*models.NodeStatus, error) {
+ return &models.NodeStatus{}, nil
+}
+
+func (f *fakeRemoteNodeClient) GetStatistics(ctx context.Context, hostName string) (*models.Statistics, error) {
+ return &models.Statistics{}, nil
+}
+
+type fakeReplicationClient struct{}
+
+var _ replica.Client = (*fakeReplicationClient)(nil)
+
+func (f *fakeReplicationClient) PutObject(ctx context.Context, host, index, shard, requestID string,
+ obj *storobj.Object, schemaVersion uint64,
+) (replica.SimpleResponse, error) {
+ return replica.SimpleResponse{}, nil
+}
+
+func (f *fakeReplicationClient) DeleteObject(ctx context.Context, host, index, shard, requestID string,
+ id strfmt.UUID, deletionTime time.Time, schemaVersion uint64,
+) (replica.SimpleResponse, error) {
+ return replica.SimpleResponse{}, nil
+}
+
+func (f *fakeReplicationClient) PutObjects(ctx context.Context, host, index, shard, requestID string,
+ objs []*storobj.Object, schemaVersion uint64,
+) (replica.SimpleResponse, error) {
+ return replica.SimpleResponse{}, nil
+}
+
+func (f *fakeReplicationClient) MergeObject(ctx context.Context, host, index, shard, requestID string,
+ mergeDoc *objects.MergeDocument, schemaVersion uint64,
+) (replica.SimpleResponse, error) {
+ return replica.SimpleResponse{}, nil
+}
+
+func (f *fakeReplicationClient) DeleteObjects(ctx context.Context, host, index, shard, requestID string,
+ uuids []strfmt.UUID, deletionTime time.Time, dryRun bool, schemaVersion uint64,
+) (replica.SimpleResponse, error) {
+ return replica.SimpleResponse{}, nil
+}
+
+func (f *fakeReplicationClient) AddReferences(ctx context.Context, host, index, shard, requestID string,
+ refs []objects.BatchReference, schemaVersion uint64,
+) (replica.SimpleResponse, error) {
+ return replica.SimpleResponse{}, nil
+}
+
+func (f *fakeReplicationClient) Commit(ctx context.Context, host, index, shard, requestID string, resp interface{}) error {
+ return nil
+}
+
+func (f *fakeReplicationClient) Abort(ctx context.Context, host, index, shard, requestID string) (replica.SimpleResponse, error) {
+ return replica.SimpleResponse{}, nil
+}
+
+func (c *fakeReplicationClient) Exists(ctx context.Context, host, index,
+ shard string, id strfmt.UUID,
+) (bool, error) {
+ return false, nil
+}
+
+func (f *fakeReplicationClient) FetchObject(_ context.Context, host, index,
+ shard string, id strfmt.UUID, props search.SelectProperties,
+ additional additional.Properties, numRetries int,
+) (replica.Replica, error) {
+ return replica.Replica{}, nil
+}
+
+func (c *fakeReplicationClient) FetchObjects(ctx context.Context, host,
+ index, shard string, ids []strfmt.UUID,
+) ([]replica.Replica, error) {
+ return nil, nil
+}
+
+func (c *fakeReplicationClient) DigestObjects(ctx context.Context,
+ host, index, shard string, ids []strfmt.UUID, numRetries int,
+) (result []types.RepairResponse, err error) {
+ return nil, nil
+}
+
+func (c *fakeReplicationClient) OverwriteObjects(ctx context.Context,
+ host, index, shard string, vobjects []*objects.VObject,
+) ([]types.RepairResponse, error) {
+ return nil, nil
+}
+
+func (c *fakeReplicationClient) FindUUIDs(ctx context.Context, host, index, shard string,
+ filters *filters.LocalFilter,
+) ([]strfmt.UUID, error) {
+ return nil, nil
+}
+
+func (c *fakeReplicationClient) DigestObjectsInRange(ctx context.Context, host, index, shard string,
+ initialUUID, finalUUID strfmt.UUID, limit int,
+) ([]types.RepairResponse, error) {
+ return nil, nil
+}
+
+func (c *fakeReplicationClient) HashTreeLevel(ctx context.Context, host, index, shard string, level int,
+ discriminant *hashtree.Bitset,
+) (digests []hashtree.Digest, err error) {
+ return nil, nil
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/cluster/mocks/node_selector.go b/platform/dbops/binaries/weaviate-src/usecases/cluster/mocks/node_selector.go
new file mode 100644
index 0000000000000000000000000000000000000000..c4fe43d49eaed9993eb45a51dde936145dc2dad5
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/cluster/mocks/node_selector.go
@@ -0,0 +1,71 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package mocks
+
+import "sort"
+
+type memberlist struct {
+ // nodes include the node names only
+ nodes []string
+}
+
+func (m memberlist) StorageCandidates() []string {
+ sort.Strings(m.nodes)
+ return m.nodes
+}
+
+func (m memberlist) NonStorageNodes() []string {
+ return []string{}
+}
+
+func (m memberlist) SortCandidates(nodes []string) []string {
+ sort.Strings(nodes)
+ return nodes
+}
+
+func (m memberlist) NodeHostname(name string) (string, bool) {
+ for _, node := range m.nodes {
+ if node == name {
+ return name, true
+ }
+ }
+ return "", false
+}
+
+func (m memberlist) LocalName() string {
+ if len(m.nodes) == 0 {
+ return ""
+ }
+
+ return m.nodes[0]
+}
+
+func (m memberlist) AllHostnames() []string {
+ return m.nodes
+}
+
+func (m memberlist) NodeAddress(name string) string {
+ for _, node := range m.nodes {
+ if node == name {
+ return name
+ }
+ }
+ return ""
+}
+
+func (m memberlist) NodeGRPCPort(name string) (int, error) {
+ return 0, nil
+}
+
+func NewMockNodeSelector(node ...string) memberlist {
+ return memberlist{nodes: node}
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/runtime/collection_retrieval_strategy.go b/platform/dbops/binaries/weaviate-src/usecases/config/runtime/collection_retrieval_strategy.go
new file mode 100644
index 0000000000000000000000000000000000000000..8307a72e13f0511199178bb285dc22dadeb7f037
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/config/runtime/collection_retrieval_strategy.go
@@ -0,0 +1,23 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package runtime
+
+type CollectionRetrievalStrategy string
+
+const (
+ LeaderOnly CollectionRetrievalStrategy = "LeaderOnly"
+ LocalOnly CollectionRetrievalStrategy = "LocalOnly"
+ LeaderOnMismatch CollectionRetrievalStrategy = "LeaderOnMismatch"
+
+ CollectionRetrievalStrategyEnvVariable = "COLLECTION_RETRIEVAL_STRATEGY"
+ CollectionRetrievalStrategyLDKey = "collection-retrieval-strategy"
+)
diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/runtime/feature_flag.go b/platform/dbops/binaries/weaviate-src/usecases/config/runtime/feature_flag.go
new file mode 100644
index 0000000000000000000000000000000000000000..58f4758974413a72d16ea951f60be3b8c35a844d
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/config/runtime/feature_flag.go
@@ -0,0 +1,274 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package runtime
+
+import (
+ "os"
+ "strconv"
+ "sync"
+
+ "github.com/launchdarkly/go-sdk-common/v3/ldvalue"
+ ldInterfaces "github.com/launchdarkly/go-server-sdk/v7/interfaces"
+ "github.com/sirupsen/logrus"
+ "github.com/weaviate/weaviate/entities/errors"
+)
+
+// SupportedTypes is the generic type grouping that FeatureFlag can handle.
+type SupportedTypes interface {
+ bool | string | int | float64
+}
+
+// FeatureFlag is a generic structure that supports reading a value that can be changed by external factors at runtime (for example LD
+// integration).
+// It is meant to be used for configuration options that can be updated at runtime.
+type FeatureFlag[T SupportedTypes] struct {
+ // Not embedding the mutex as we don't want to expose the mutex API to users of FeatureFlag
+ mu sync.RWMutex
+ // value is the actual value of the feature flag
+ value T
+ // key is the LD key of the flag as registered in the platform
+ key string
+
+ ldInteg *LDIntegration
+ // onChange is an array of callbacks that will be called on any change of value.
+ // The callback will receive the previous value and the new value of the flag.
+ // The callbacks are blocking subsequent updates from LD and should therefore return fast and not block.
+ onChange []func(oldValue T, newValue T)
+ // ldChangeChannel is the channel from which the LD sdk will send update to the flag based on the targeting configured in the SDK and
+ // the flag key.
+ ldChangeChannel <-chan ldInterfaces.FlagValueChangeEvent
+ logger logrus.FieldLogger
+}
+
+// NewFeatureFlag returns a new feature flag of type T configured with key as the remote LD flag key (if LD integration is available) and
+// defaultValue used as the default value of the flag.
+func NewFeatureFlag[T SupportedTypes](key string, defaultValue T, ldInteg *LDIntegration, envDefault string, logger logrus.FieldLogger) *FeatureFlag[T] {
+ f := &FeatureFlag[T]{
+ key: key,
+ value: defaultValue,
+ ldInteg: ldInteg,
+ logger: logger.WithFields(logrus.Fields{"tool": "feature_flag", "flag_key": key}),
+ }
+ // If an env is specified let's read it and load it as the default value
+ if envDefault != "" {
+ f.evaluateEnv(envDefault)
+ }
+
+ // If an LD client is available let's use it
+ if f.ldInteg != nil {
+ // use f.value as the default value for LD, this way in case of LD failure it will fallback to that default
+ // this is useful if we have an env variable that changes the default from defaultValue
+ f.evaluateLDFlag(f.value)
+
+ // Start up the change listener to receive update & re-evaluate the flag from LD
+ f.ldChangeChannel = f.ldInteg.ldClient.GetFlagTracker().AddFlagValueChangeListener(f.key, f.ldInteg.ldContext, ldvalue.Null())
+ f.startChangeMonitoring()
+ }
+ f.logger.WithFields(logrus.Fields{
+ "value": f.value,
+ }).Info("feature flag instantiated")
+
+ // Always log feature flag changes
+ f.WithOnChangeCallback(func(oldValue T, newValue T) {
+ f.logger.WithFields(logrus.Fields{
+ "old_value": oldValue,
+ "new_value": newValue,
+ }).Info("flag change detected")
+ })
+ return f
+}
+
+func (f *FeatureFlag[T]) startChangeMonitoring() {
+ errors.GoWrapper(func() {
+ for event := range f.ldChangeChannel {
+ // If the value is the same as the default one that is configured that means an error happened when evaluating the flag,
+ // therefore we fallback to the current stored/default value.
+ if event.NewValue.IsNull() {
+ f.logger.Warn("LD updated value to null, ignoring and keeping current value")
+ continue
+ }
+
+ // Read the value from the payload and then cast it back to the expected flag type on our end
+ var value interface{}
+ switch event.NewValue.Type() {
+ case ldvalue.BoolType:
+ value = event.NewValue.BoolValue()
+ case ldvalue.StringType:
+ value = event.NewValue.StringValue()
+ case ldvalue.NumberType:
+ // We have to check the underlying number type of value (if any) in order to cast to the right number type.
+ switch any(f.value).(type) {
+ case int:
+ value = event.NewValue.IntValue()
+ case float64:
+ value = event.NewValue.Float64Value()
+ default:
+ f.logger.Warnf("could not parse number update from LD as flag is not a number but instead %T", f.value)
+ }
+ default:
+ // We only hit this case if the flag on the LD side is not configured as a supported type
+ f.logger.Warn("could not parse update from LD as flag is not a supported type")
+ }
+
+ // If we can cast the value use it
+ if v, ok := value.(T); ok {
+ // Explicitly Lock and Unlock before calling the change callbacks to ensure we don't keep a lock while
+ // calling the callbacks
+ f.mu.Lock()
+ old := f.value
+ f.value = v
+ f.mu.Unlock()
+
+ // Be explicit in debug mode only
+ f.logger.WithFields(logrus.Fields{
+ "old_value": old,
+ "new_value": value,
+ }).Debug("flag change detected")
+
+ // If we have any callbacks registered, call them now
+ if f.onChange != nil {
+ for _, onChange := range f.onChange {
+ onChange(old, v)
+ }
+ }
+ }
+ }
+ }, f.logger)
+}
+
+func (f *FeatureFlag[T]) evaluateLDFlag(defaultVal T) {
+ // We will read and write the flag
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ // Lookup which underlying type we're implementing and call the related LD flag evaluation method
+ // then store the result
+ switch any(f.value).(type) {
+ case string:
+ if v, ok := any(defaultVal).(string); !ok {
+ f.logger.Warnf("should not happen, type %T evaluated as string but can't be casted to string", f.value)
+ } else {
+ flag, err := f.ldInteg.ldClient.StringVariation(f.key, f.ldInteg.ldContext, v)
+ if err != nil {
+ f.logger.Warnf("could not evaluate LD flag: %w", err)
+ }
+ f.value = any(flag).(T)
+ }
+ case int:
+ if v, ok := any(defaultVal).(int); !ok {
+ f.logger.Warnf("should not happen, type %T evaluated as int but can't be casted to int", f.value)
+ } else {
+ flag, err := f.ldInteg.ldClient.IntVariation(f.key, f.ldInteg.ldContext, v)
+ if err != nil {
+ f.logger.Warnf("could not evaluate LD flag: %w", err)
+ }
+ f.value = any(flag).(T)
+ }
+ case float64:
+ if v, ok := any(defaultVal).(float64); !ok {
+ f.logger.Warnf("should not happen, type %T evaluated as float64 but can't be casted to float64", f.value)
+ } else {
+ flag, err := f.ldInteg.ldClient.Float64Variation(f.key, f.ldInteg.ldContext, v)
+ if err != nil {
+ f.logger.Warnf("could not evaluate LD flag: %w", err)
+ }
+ f.value = any(flag).(T)
+ }
+ case bool:
+ if v, ok := any(defaultVal).(bool); !ok {
+ f.logger.Warnf("should not happen, type %T evaluated as bool but can't be casted to bool", f.value)
+ } else {
+ flag, err := f.ldInteg.ldClient.BoolVariation(f.key, f.ldInteg.ldContext, v)
+ if err != nil {
+ f.logger.Warnf("could not evaluate LD flag: %w", err)
+ }
+ f.value = any(flag).(T)
+ }
+ default:
+ // This should not happen
+ f.logger.Warnf("unsuported feature flag value type, got %T", f.value)
+ }
+
+ f.logger.WithField("value", f.value).Infof("flag value after LD evaluation")
+}
+
+// Close ensure that all internal ressources are freed
+func (f *FeatureFlag[T]) Close() {
+ if f.ldInteg != nil {
+ f.ldInteg.ldClient.GetFlagTracker().RemoveFlagValueChangeListener(f.ldChangeChannel)
+ }
+}
+
+// evaluate configures the feature flag to read a default value from envVariable.
+// If envVariable is not present in the env the default value will not change
+// If envVariable returns a value that is not convertible to type T the value will not change.
+// Type string is directly casted
+// Type int is parsed with strconv.Atoi
+// Type float64 is parsed with strconv.ParseFloat
+// Type bool is parsed with strconv.ParseBool
+func (f *FeatureFlag[T]) evaluateEnv(envVariable string) *FeatureFlag[T] {
+ val, ok := os.LookupEnv(envVariable)
+ // Return early if the env variable is absent
+ if !ok {
+ return f
+ }
+
+ // We're going to both read and write the value
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ // Parse val into the right type expected by T
+ switch any(f.value).(type) {
+ case string:
+ if v, ok := any(val).(T); !ok {
+ f.logger.Warn("could not cast string from env, ignoring env value")
+ } else {
+ f.value = v
+ }
+ case int:
+ if v, err := strconv.Atoi(val); err != nil {
+ f.logger.Warn("could not parse int from env, ignoring env value: %w", err)
+ } else {
+ f.value = any(v).(T)
+ }
+ case float64:
+ if v, err := strconv.ParseFloat(val, 64); err != nil {
+ f.logger.Warn("could not parse float64 from env, ignoring env value: %w", err)
+ } else {
+ f.value = any(v).(T)
+ }
+ case bool:
+ if v, err := strconv.ParseBool(val); err != nil {
+ f.logger.Warn("could not parse bool from env, ignoring env value: %w", err)
+ } else {
+ f.value = any(v).(T)
+ }
+ default:
+ // This should not happen
+ f.logger.Warnf("unsuported feature flag value type, got %T", f.value)
+ }
+ f.logger.WithField("value", f.value).Infof("flag value after env evaluation")
+ return f
+}
+
+// WithOnChangeCallback registers onChange to be called everytime the value of the feature flag is changed at runtime
+// These callbacks are blocking in the loop to update the feature flag, therefore the callback *must not* block.
+func (f *FeatureFlag[T]) WithOnChangeCallback(onChange func(T, T)) *FeatureFlag[T] {
+ f.onChange = append(f.onChange, onChange)
+ return f
+}
+
+// Get reads the current value of the feature flags and returns it.
+func (f *FeatureFlag[T]) Get() T {
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+ return f.value
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/runtime/feature_flag_test.go b/platform/dbops/binaries/weaviate-src/usecases/config/runtime/feature_flag_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..8345dafc0aba29972523a1c9dc69448c7ddec6a8
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/config/runtime/feature_flag_test.go
@@ -0,0 +1,116 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package runtime_test
+
+import (
+ "os"
+ "testing"
+
+ "github.com/sirupsen/logrus/hooks/test"
+ "github.com/stretchr/testify/require"
+ configRuntime "github.com/weaviate/weaviate/usecases/config/runtime"
+)
+
+func TestFeatureFlagNoLD(t *testing.T) {
+ // Test Integer
+ logger, _ := test.NewNullLogger()
+ fInt := configRuntime.NewFeatureFlag("test-feature-flag", 5, nil, "", logger)
+ require.Equal(t, 5, fInt.Get())
+
+ // Test Float
+ fFloat := configRuntime.NewFeatureFlag("test-feature-flag", 5.5, nil, "", logger)
+ require.Equal(t, 5.5, fFloat.Get())
+
+ // Test String
+ fString := configRuntime.NewFeatureFlag("test-feature-flag", "my-super-flag", nil, "", logger)
+ require.Equal(t, "my-super-flag", fString.Get())
+
+ // Test Bool
+ fBool := configRuntime.NewFeatureFlag("test-feature-flag", true, nil, "", logger)
+ require.Equal(t, true, fBool.Get())
+}
+
+func TestFeatureFlagNoLDWithDefaultEnvInt(t *testing.T) {
+ os.Setenv("VALID_INT_FF", "3")
+ os.Setenv("INVALID_INT_FF", "true")
+ logger, _ := test.NewNullLogger()
+
+ fInt := configRuntime.NewFeatureFlag("test-feature-flag", 5, nil, "VALID_INT_FF", logger)
+ require.Equal(t, 3, fInt.Get())
+
+ fInt = configRuntime.NewFeatureFlag("test-feature-flag", 5, nil, "INVALID_INT_FF", logger)
+ require.Equal(t, 5, fInt.Get())
+
+ fInt = configRuntime.NewFeatureFlag("test-feature-flag", 5, nil, "ABSENT", logger)
+ require.Equal(t, 5, fInt.Get())
+}
+
+func TestFeatureFlagNoLDWithDefaultEnvFloat(t *testing.T) {
+ os.Setenv("VALID_FLOAT_FF", "3.3")
+ os.Setenv("VALID_INT_OR_FLOAT_FF", "3")
+ os.Setenv("INVALID_FLOAT_FF", "true")
+ logger, _ := test.NewNullLogger()
+
+ fFloat := configRuntime.NewFeatureFlag("test-feature-flag", 5.5, nil, "VALID_FLOAT_FF", logger)
+ require.Equal(t, 3.3, fFloat.Get())
+
+ fFloat = configRuntime.NewFeatureFlag("test-feature-flag", 5.5, nil, "VALID_INT_OR_FLOAT_FF", logger)
+ require.Equal(t, float64(3), fFloat.Get())
+
+ fFloat = configRuntime.NewFeatureFlag("test-feature-flag", 5.5, nil, "INVALID_FLOAT_FF", logger)
+ require.Equal(t, 5.5, fFloat.Get())
+
+ fFloat = configRuntime.NewFeatureFlag("test-feature-flag", 5.5, nil, "ABSENT", logger)
+ require.Equal(t, 5.5, fFloat.Get())
+}
+
+func TestFeatureFlagNoLDWithDefaultEnvString(t *testing.T) {
+ os.Setenv("VALID_INT_OR_STRING_FF", "3")
+ os.Setenv("VALID_FLOAT_OR_STRING_FF", "3.3")
+ os.Setenv("VALID_BOOL_OR_STRING_FF", "true")
+ os.Setenv("VALID_STRING_FF", "whatever-i-want")
+ logger, _ := test.NewNullLogger()
+
+ fString := configRuntime.NewFeatureFlag("test-feature-flag", "value", nil, "VALID_INT_OR_STRING_FF", logger)
+ require.Equal(t, "3", fString.Get())
+
+ fString = configRuntime.NewFeatureFlag("test-feature-flag", "value", nil, "VALID_FLOAT_OR_STRING_FF", logger)
+ require.Equal(t, "3.3", fString.Get())
+
+ fString = configRuntime.NewFeatureFlag("test-feature-flag", "value", nil, "VALID_BOOL_OR_STRING_FF", logger)
+ require.Equal(t, "true", fString.Get())
+
+ fString = configRuntime.NewFeatureFlag("test-feature-flag", "value", nil, "VALID_STRING_FF", logger)
+ require.Equal(t, "whatever-i-want", fString.Get())
+
+ fString = configRuntime.NewFeatureFlag("test-feature-flag", "value", nil, "ABSENT", logger)
+ require.Equal(t, "value", fString.Get())
+}
+
+func TestFeatureFlagNoLDWithDefaultEnvBool(t *testing.T) {
+ os.Setenv("VALID_BOOL_TRUE_FF", "true")
+ os.Setenv("VALID_BOOL_FALSE_FF", "false")
+ os.Setenv("INVALID_BOOL_FF", "10")
+ logger, _ := test.NewNullLogger()
+
+ fBool := configRuntime.NewFeatureFlag("test-feature-flag", false, nil, "VALID_BOOL_TRUE_FF", logger)
+ require.Equal(t, true, fBool.Get())
+
+ fBool = configRuntime.NewFeatureFlag("test-feature-flag", true, nil, "VALID_BOOL_FALSE_FF", logger)
+ require.Equal(t, false, fBool.Get())
+
+ fBool = configRuntime.NewFeatureFlag("test-feature-flag", true, nil, "INVALID_BOOL_FF", logger)
+ require.Equal(t, true, fBool.Get())
+
+ fBool = configRuntime.NewFeatureFlag("test-feature-flag", true, nil, "ABSENT", logger)
+ require.Equal(t, true, fBool.Get())
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/runtime/launch_darkly.go b/platform/dbops/binaries/weaviate-src/usecases/config/runtime/launch_darkly.go
new file mode 100644
index 0000000000000000000000000000000000000000..94e4409c1e6103303dbd2de4e7e4152c5c2fc50f
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/config/runtime/launch_darkly.go
@@ -0,0 +1,102 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package runtime
+
+import (
+
+ // go-sdk-common/v3/ldcontext defines LaunchDarkly's model for contexts
+
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/launchdarkly/go-sdk-common/v3/ldcontext"
+
+ // go-sdk-common/v3/ldmigration defines LaunchDarkly's model for migration feature flags
+ _ "github.com/launchdarkly/go-sdk-common/v3/ldmigration"
+
+ // go-server-sdk/v7 is the main SDK package - here we are aliasing it to "ld"
+ ld "github.com/launchdarkly/go-server-sdk/v7"
+
+ // go-server-sdk/v7/ldcomponents is for advanced configuration options
+ _ "github.com/launchdarkly/go-server-sdk/v7/ldcomponents"
+)
+
+type LDIntegration struct {
+ // ldClient is not nil if the LD integration has been successfully configured.
+ ldClient *ld.LDClient
+
+ // ldContext is the current context configured for this particular process.
+ ldContext ldcontext.Context
+}
+
+const (
+ WeaviateLDApiKey = "WEAVIATE_LD_API_KEY"
+ WeaviateLDClusterKey = "WEAVIATE_LD_CLUSTER_KEY"
+ WeaviateLDOrgKey = "WEAVIATE_LD_ORG_KEY"
+
+ LDContextOrgKey = "org"
+ LDContextClusterKey = "cluster"
+ LDContextNodeKey = "node"
+)
+
+// ConfigureLDIntegration will configure the necessary global variables to have `FeatureFlag` struct be able to use LD flags
+func ConfigureLDIntegration() (*LDIntegration, error) {
+ var err error
+
+ // Fetch all the necessary env variable and exit if one fails
+ ldApiKey, ok := os.LookupEnv(WeaviateLDApiKey)
+ if !ok {
+ return nil, fmt.Errorf("could not locate %s env variable", WeaviateLDApiKey)
+ }
+ orgKey, ok := os.LookupEnv(WeaviateLDOrgKey)
+ if !ok {
+ return nil, fmt.Errorf("could not locate %s env variable", WeaviateLDOrgKey)
+ }
+ clusterKey, ok := os.LookupEnv(WeaviateLDClusterKey)
+ if !ok {
+ return nil, fmt.Errorf("could not locate %s env variable", WeaviateLDClusterKey)
+ }
+ // Re-using the current approach to parse the nodeName in the config
+ nodeKey, ok := os.LookupEnv("CLUSTER_HOSTNAME")
+ if !ok || nodeKey == "" {
+ nodeKey, err = os.Hostname()
+ if err != nil {
+ return nil, fmt.Errorf("could not locate CLUSTER_HOSTNAME env variable")
+ }
+ }
+
+ // Instantiate the LD client
+ ldClient, err := ld.MakeClient(ldApiKey, 5*time.Second)
+ if err != nil {
+ return nil, fmt.Errorf("could not instantiate LD Client: %w", err)
+ }
+ // Can happen according to LD SDK docs
+ if ldClient == nil {
+ return nil, fmt.Errorf("LD client instantiation successful but client is nil")
+ }
+
+ // Instantiate the LD context
+ orgContext := ldcontext.NewBuilder(orgKey).Kind(LDContextOrgKey).Build()
+ clusterContext := ldcontext.NewBuilder(clusterKey).Kind(LDContextClusterKey).Build()
+ nodeContext := ldcontext.NewBuilder(nodeKey).Kind(LDContextNodeKey).Build()
+ ldContext, err := ldcontext.NewMultiBuilder().Add(clusterContext).Add(orgContext).Add(nodeContext).TryBuild()
+ if err != nil {
+ return nil, fmt.Errorf("could not instantiate LD context: %w", err)
+ }
+
+ // Success
+ return &LDIntegration{
+ ldClient: ldClient,
+ ldContext: ldContext,
+ }, nil
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/runtime/manager.go b/platform/dbops/binaries/weaviate-src/usecases/config/runtime/manager.go
new file mode 100644
index 0000000000000000000000000000000000000000..1f64b8395416c584e2d694a7383766d1632823b6
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/config/runtime/manager.go
@@ -0,0 +1,182 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package runtime
+
+import (
+ "context"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "os/signal"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ ErrEmptyConfig = errors.New("empty runtime config")
+ ErrFailedToOpenConfig = errors.New("failed to open runtime config")
+ ErrFailedToReadConfig = errors.New("failed to read runtime config ")
+ ErrFailedToParseConfig = errors.New("failed to parse runtime config ")
+ ErrUnregisteredConfigFound = errors.New("unregistered config found")
+)
+
+// Parser takes care of unmarshaling a config struct
+// from given raw bytes(e.g: YAML, JSON, etc).
+type Parser[T any] func([]byte) (*T, error)
+
+// Updater try to update `source` config with newly `parsed` config.
+type Updater[T any] func(log logrus.FieldLogger, source, parsed *T, hooks map[string]func() error) error
+
+// ConfigManager takes care of periodically loading the config from
+// given filepath for every interval period.
+type ConfigManager[T any] struct {
+ // path is file path of config to load and unmarshal from
+ path string
+ // interval is how often config manager trigger loading the config file.
+ interval time.Duration
+
+ parse Parser[T]
+ update Updater[T]
+
+ // currentConfig is last successfully loaded config.
+ // ConfigManager keep using this config if there are any
+ // failures to load new configs.
+ currentConfig *T
+ currentHash string
+
+ log logrus.FieldLogger
+ lastLoadSuccess prometheus.Gauge
+ configHash *prometheus.GaugeVec
+
+ // exp hooks
+ hooks map[string]func() error
+}
+
+func NewConfigManager[T any](
+ filepath string,
+ parser Parser[T],
+ updater Updater[T],
+ registered *T,
+ interval time.Duration,
+ log logrus.FieldLogger,
+ hooks map[string]func() error,
+ r prometheus.Registerer,
+) (*ConfigManager[T], error) {
+ // catch empty filepath early
+ if len(strings.TrimSpace(filepath)) == 0 {
+ return nil, errors.New("filepath to load runtimeconfig is empty")
+ }
+
+ cm := &ConfigManager[T]{
+ path: filepath,
+ interval: interval,
+ log: log,
+ parse: parser,
+ update: updater,
+ lastLoadSuccess: promauto.With(r).NewGauge(prometheus.GaugeOpts{
+ Name: "weaviate_runtime_config_last_load_success",
+ Help: "Whether the last loading attempt of runtime config was success",
+ }),
+ configHash: promauto.With(r).NewGaugeVec(prometheus.GaugeOpts{
+ Name: "weaviate_runtime_config_hash",
+ Help: "Hash value of the currently active runtime configuration",
+ }, []string{"sha256"}), // sha256 is type of checksum and hard-coded for now
+ currentConfig: registered,
+ hooks: hooks,
+ }
+
+ // try to load it once to fail early if configs are invalid
+ if err := cm.loadConfig(); err != nil {
+ return nil, err
+ }
+
+ return cm, nil
+}
+
+// Run is a blocking call that starts the configmanager actor. Consumer probably want to
+// call it in different groutine. It also respects the passed in `ctx`.
+// Meaning, cancelling the passed `ctx` stops the actor.
+func (cm *ConfigManager[T]) Run(ctx context.Context) error {
+ return cm.loop(ctx)
+}
+
+// loadConfig reads and unmarshal the config from the file location.
+func (cm *ConfigManager[T]) loadConfig() error {
+ f, err := os.Open(cm.path)
+ if err != nil {
+ cm.lastLoadSuccess.Set(0)
+ return errors.Join(ErrFailedToOpenConfig, err)
+ }
+ defer f.Close()
+
+ b, err := io.ReadAll(f)
+ if err != nil {
+ cm.lastLoadSuccess.Set(0)
+ return errors.Join(ErrFailedToReadConfig, err)
+ }
+
+ hash := fmt.Sprintf("%x", sha256.Sum256(b))
+ if hash == cm.currentHash {
+ cm.lastLoadSuccess.Set(1)
+ return nil // same file. no change
+ }
+
+ cfg, err := cm.parse(b)
+ if err != nil {
+ cm.lastLoadSuccess.Set(0)
+ return errors.Join(ErrFailedToParseConfig, err)
+ }
+
+ if err := cm.update(cm.log, cm.currentConfig, cfg, cm.hooks); err != nil {
+ return err
+ }
+
+ cm.lastLoadSuccess.Set(1)
+ cm.configHash.Reset()
+ cm.configHash.WithLabelValues(hash).Set(1)
+ cm.currentHash = hash
+
+ return nil
+}
+
+// loop is a actor loop that runs forever till config manager is stopped.
+// it orchestrates between "loading" configs and "stopping" the config manager
+func (cm *ConfigManager[T]) loop(ctx context.Context) error {
+ ticker := time.NewTicker(cm.interval)
+ defer ticker.Stop()
+
+ // SIGHUP handler to trigger reload
+ sighup := make(chan os.Signal, 1)
+ signal.Notify(sighup, syscall.SIGHUP)
+
+ for {
+ select {
+ case <-ticker.C:
+ if err := cm.loadConfig(); err != nil {
+ cm.log.Errorf("loading runtime config every %s failed, using old config: %v", cm.interval, err)
+ }
+ case <-sighup:
+ if err := cm.loadConfig(); err != nil {
+ cm.log.Error("loading runtime config through SIGHUP failed, using old config: %v", err)
+ }
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/runtime/manager_test.go b/platform/dbops/binaries/weaviate-src/usecases/config/runtime/manager_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..8f97d59eec0e90267c6c5bf0b56c4e0f77373a63
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/config/runtime/manager_test.go
@@ -0,0 +1,433 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package runtime
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "fmt"
+ "os"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/testutil"
+ "github.com/sirupsen/logrus"
+ "github.com/sirupsen/logrus/hooks/test"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v3"
+)
+
+type testConfig struct {
+ BackupInterval *DynamicValue[time.Duration] `yaml:"backup_interval"`
+}
+
+func parseYaml(buf []byte) (*testConfig, error) {
+ var c testConfig
+ dec := yaml.NewDecoder(bytes.NewReader(buf))
+ dec.KnownFields(true)
+
+ if err := dec.Decode(&c); err != nil {
+ return nil, err
+ }
+ return &c, nil
+}
+
+func updater(_ logrus.FieldLogger, source, parsed *testConfig, _ map[string]func() error) error {
+ source.BackupInterval.SetValue(parsed.BackupInterval.Get())
+ return nil
+}
+
+func TestConfigManager_loadConfig(t *testing.T) {
+ log, _ := test.NewNullLogger()
+ registered := &testConfig{
+ BackupInterval: NewDynamicValue(2 * time.Second),
+ }
+
+ t.Run("non-exist config should fail config manager at the startup", func(t *testing.T) {
+ reg := prometheus.NewPedanticRegistry()
+ _, err := NewConfigManager("non-exist.yaml", parseYaml, updater, registered, 10*time.Millisecond, log, nil, reg)
+ require.ErrorIs(t, err, ErrFailedToOpenConfig)
+
+ // assert: config_last_load_success=0 and no metric for config_hash
+ assert.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(`
+ # HELP weaviate_runtime_config_last_load_success Whether the last loading attempt of runtime config was success
+ # TYPE weaviate_runtime_config_last_load_success gauge
+ weaviate_runtime_config_last_load_success 0
+ `)))
+ })
+
+ t.Run("invalid config should fail config manager at the startup", func(t *testing.T) {
+ reg := prometheus.NewPedanticRegistry()
+
+ tmp, err := os.CreateTemp("", "invalid_config.yaml")
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, os.Remove(tmp.Name()))
+ })
+
+ _, err = tmp.Write([]byte("backup_interval=10s")) // in-valid yaml
+ require.NoError(t, err)
+ require.NoError(t, tmp.Close())
+
+ _, err = NewConfigManager(tmp.Name(), parseYaml, updater, registered, 10*time.Millisecond, log, nil, reg)
+ require.ErrorIs(t, err, ErrFailedToParseConfig)
+
+ // assert: config_last_load_success=0 and no metric for config_hash
+ assert.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(`
+ # HELP weaviate_runtime_config_last_load_success Whether the last loading attempt of runtime config was success
+ # TYPE weaviate_runtime_config_last_load_success gauge
+ weaviate_runtime_config_last_load_success 0
+ `)))
+ })
+
+ t.Run("valid config should succeed creating config manager at the startup", func(t *testing.T) {
+ reg := prometheus.NewPedanticRegistry()
+
+ tmp, err := os.CreateTemp("", "valid_config.yaml")
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, os.Remove(tmp.Name()))
+ })
+
+ buf := []byte(`backup_interval: 10s`)
+
+ _, err = tmp.Write(buf) // valid yaml
+ require.NoError(t, err)
+ require.NoError(t, tmp.Close())
+
+ _, err = NewConfigManager(tmp.Name(), parseYaml, updater, registered, 10*time.Millisecond, log, nil, reg)
+ require.NoError(t, err)
+
+ // assert: config_last_load_success=1 and config_hash should be set.
+ assert.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(fmt.Sprintf(`
+ # HELP weaviate_runtime_config_hash Hash value of the currently active runtime configuration
+ # TYPE weaviate_runtime_config_hash gauge
+ weaviate_runtime_config_hash{sha256="%s"} 1
+ # HELP weaviate_runtime_config_last_load_success Whether the last loading attempt of runtime config was success
+ # TYPE weaviate_runtime_config_last_load_success gauge
+ weaviate_runtime_config_last_load_success 1
+ `, fmt.Sprintf("%x", sha256.Sum256(buf))))))
+ })
+
+ t.Run("changing config file should reload the config", func(t *testing.T) {
+ reg := prometheus.NewPedanticRegistry()
+
+ // calling parser => reloading the config
+ loadCount := 0
+ trackedParser := func(buf []byte) (*testConfig, error) {
+ loadCount++
+ return parseYaml(buf)
+ }
+
+ tmp, err := os.CreateTemp("", "valid_config.yaml")
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, os.Remove(tmp.Name()))
+ })
+
+ buf := []byte(`backup_interval: 10s`)
+
+ _, err = tmp.Write(buf) // valid yaml
+ require.NoError(t, err)
+ require.NoError(t, tmp.Close())
+
+ cm, err := NewConfigManager(tmp.Name(), trackedParser, updater, registered, 10*time.Millisecond, log, nil, reg)
+ require.NoError(t, err)
+
+ // assert: should have called `parser` only once during initial loading.
+ assert.Equal(t, 1, loadCount)
+
+ // Now let's change the config file few times
+ var (
+ wg sync.WaitGroup
+ ctx, cancel = context.WithCancel(context.Background())
+ )
+ defer cancel() // being good citizen.
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ cm.Run(ctx)
+ }()
+
+ n := 3 // number of times we change the config file after initial reload
+ writeDelay := 100 * time.Millisecond
+ for i := 0; i < n; i++ {
+ // write different config every time
+ buf := []byte(fmt.Sprintf("backup_interval: %ds", i+1))
+
+ // Writing as two step process to avoid any race between writing and manager reading the file.
+ writeFile(t, tmp.Name(), buf)
+
+ // give enough time to config manager to reload the previously written config
+ // assert: new config_last_load_success=1 and config_hash should be changed as well.
+ assert.EventuallyWithT(t, func(c *assert.CollectT) {
+ assert.NoError(c, testutil.GatherAndCompare(reg, strings.NewReader(fmt.Sprintf(`
+ # HELP weaviate_runtime_config_hash Hash value of the currently active runtime configuration
+ # TYPE weaviate_runtime_config_hash gauge
+ weaviate_runtime_config_hash{sha256="%s"} 1
+ # HELP weaviate_runtime_config_last_load_success Whether the last loading attempt of runtime config was success
+ # TYPE weaviate_runtime_config_last_load_success gauge
+ weaviate_runtime_config_last_load_success 1
+ `, fmt.Sprintf("%x", sha256.Sum256(buf))))))
+ }, writeDelay, writeDelay/2)
+ }
+
+ // stop the manger
+ cancel()
+ wg.Wait() // config manager should have stopped correctly.
+
+ // assert: changing config should have reloaded configs.
+ assert.Equal(t, n+1, loadCount) // +1 is the initial loading of config.
+ })
+
+ t.Run("injecting new invalid config file should keep using old valid config", func(t *testing.T) {
+ reg := prometheus.NewPedanticRegistry()
+
+ // calling parser => reloading the config
+ loadCount := 0
+ trackedParser := func(buf []byte) (*testConfig, error) {
+ loadCount++
+ return parseYaml(buf)
+ }
+
+ tmp, err := os.CreateTemp("", "valid_config.yaml")
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, os.Remove(tmp.Name()))
+ })
+
+ buf := []byte(`backup_interval: 10s`)
+
+ _, err = tmp.Write(buf) // valid yaml
+ require.NoError(t, err)
+ require.NoError(t, tmp.Close())
+
+ cm, err := NewConfigManager(tmp.Name(), trackedParser, updater, registered, 10*time.Millisecond, log, nil, reg)
+ require.NoError(t, err)
+
+ // assert: should have called `parser` only once during initial loading.
+ assert.Equal(t, 1, loadCount)
+
+ // Now let's inject invalid config file
+ var (
+ wg sync.WaitGroup
+ ctx, cancel = context.WithCancel(context.Background())
+ )
+ defer cancel() // being good citizen.
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ cm.Run(ctx)
+ }()
+
+ writeDelay := 100 * time.Millisecond
+ // write different config every time
+ xbuf := []byte(`backup_interval=10s`) // invalid yaml
+ err = os.WriteFile(tmp.Name(), xbuf, 0o777)
+ require.NoError(t, err)
+
+ // give enough time to config manager to reload the previously written config
+ // assert: new config_last_load_success=0 and config_hash shouldn't have changed.
+ assert.EventuallyWithT(t, func(c *assert.CollectT) {
+ assert.NoError(c, testutil.GatherAndCompare(reg, strings.NewReader(fmt.Sprintf(`
+ # HELP weaviate_runtime_config_hash Hash value of the currently active runtime configuration
+ # TYPE weaviate_runtime_config_hash gauge
+ weaviate_runtime_config_hash{sha256="%s"} 1
+ # HELP weaviate_runtime_config_last_load_success Whether the last loading attempt of runtime config was success
+ # TYPE weaviate_runtime_config_last_load_success gauge
+ weaviate_runtime_config_last_load_success 0
+ `, fmt.Sprintf("%x", sha256.Sum256(buf)))))) // should have old valid config hash
+ }, writeDelay, writeDelay/2)
+
+ // stop the manger
+ cancel()
+ wg.Wait() // config manager should have stopped correctly.
+
+ // assert: since new config is failing, it should keep re-loading
+ assert.Greater(t, loadCount, 1)
+ })
+ t.Run("unchanged config should not reload", func(t *testing.T) {
+ reg := prometheus.NewPedanticRegistry()
+
+ // calling parser => reloading the config
+ var loadCount atomic.Int64
+ trackedParser := func(buf []byte) (*testConfig, error) {
+ loadCount.Add(1)
+ return parseYaml(buf)
+ }
+
+ tmp, err := os.CreateTemp("", "valid_config.yaml")
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, os.Remove(tmp.Name()))
+ })
+
+ buf := []byte(`backup_interval: 10s`)
+
+ _, err = tmp.Write(buf) // valid yaml
+ require.NoError(t, err)
+ require.NoError(t, tmp.Close())
+
+ cm, err := NewConfigManager(tmp.Name(), trackedParser, updater, registered, 10*time.Millisecond, log, nil, reg)
+ require.NoError(t, err)
+
+ // assert: should have called `parser` only once during initial loading.
+ assert.Equal(t, int64(1), loadCount.Load())
+
+ // Now let's change the config file few times
+ var (
+ wg sync.WaitGroup
+ ctx, cancel = context.WithCancel(context.Background())
+ )
+ defer cancel() // being good citizen.
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ cm.Run(ctx)
+ }()
+
+ n := 3 // number of times we change the config file after initial reload
+ writeDelay := 100 * time.Millisecond
+ for i := 0; i < n; i++ {
+ // write same content. Writing as two step process to avoid any race between writing and manager reading the file.
+ writeFile(t, tmp.Name(), buf)
+
+ // give enough time to config manager to reload the previously written config
+ time.Sleep(writeDelay)
+ assert.EventuallyWithT(t, func(c *assert.CollectT) {
+ assert.Equal(c, int64(1), loadCount.Load())
+ }, writeDelay, writeDelay/2)
+ }
+
+ // stop the manger
+ cancel()
+ wg.Wait() // config manager should have stopped correctly.
+
+ // assert: writing same content shouldn't reload the config
+ assert.Equal(t, int64(1), loadCount.Load()) // 1 is the initial loading of config.
+ })
+}
+
+func TestConfigManager_GetConfig(t *testing.T) {
+ log, _ := test.NewNullLogger()
+
+ t.Run("receiving config should never block if manager is not reloading the config", func(t *testing.T) {
+ reg := prometheus.NewPedanticRegistry()
+ registered := &testConfig{
+ BackupInterval: NewDynamicValue(2 * time.Second),
+ }
+
+ tmp, err := os.CreateTemp("", "valid_config.yaml")
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, os.Remove(tmp.Name()))
+ })
+
+ buf := []byte(`backup_interval: 10s`)
+
+ _, err = tmp.Write(buf) // valid yaml
+ require.NoError(t, err)
+ require.NoError(t, tmp.Close())
+
+ _, err = NewConfigManager(tmp.Name(), parseYaml, updater, registered, 100*time.Millisecond, log, nil, reg)
+ require.NoError(t, err)
+
+ getConfigWait := make(chan struct{})
+
+ var wg sync.WaitGroup
+
+ // NOTE: we are not loading config anymore.
+
+ n := 100 // 100 goroutine
+ wg.Add(n)
+ for i := 0; i < 100; i++ {
+ go func() {
+ defer wg.Done()
+ <-getConfigWait // wait till all go routines ready to get the config
+ assert.Equal(t, 10*time.Second, registered.BackupInterval.Get())
+ }()
+ }
+
+ close(getConfigWait)
+ wg.Wait()
+ })
+
+ t.Run("should receive latest config after last reload", func(t *testing.T) {
+ reg := prometheus.NewPedanticRegistry()
+ registered := &testConfig{
+ BackupInterval: NewDynamicValue(2 * time.Second),
+ }
+
+ tmp, err := os.CreateTemp("", "valid_config.yaml")
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, os.Remove(tmp.Name()))
+ })
+
+ buf := []byte(`backup_interval: 10s`)
+
+ _, err = tmp.Write(buf)
+ require.NoError(t, err)
+ require.NoError(t, tmp.Close())
+
+ cm, err := NewConfigManager(tmp.Name(), parseYaml, updater, registered, 100*time.Millisecond, log, nil, reg)
+ require.NoError(t, err)
+ assertConfig(t, cm, registered, 10*time.Second)
+
+ // change the config
+ buf = []byte(`backup_interval: 20s`)
+ require.NoError(t, os.WriteFile(tmp.Name(), buf, 0o777))
+
+ require.NoError(t, cm.loadConfig()) // loading new config
+ assertConfig(t, cm, registered, 20*time.Second)
+ })
+}
+
+// helpers
+
+func assertConfig(t *testing.T, cm *ConfigManager[testConfig], registered *testConfig, expected time.Duration) {
+ getConfigWait := make(chan struct{})
+
+ var wg sync.WaitGroup
+
+ n := 100 // 100 goroutine
+ wg.Add(n)
+ for i := 0; i < 100; i++ {
+ go func() {
+ defer wg.Done()
+ <-getConfigWait // wait till all go routines ready to get the config
+ assert.Equal(t, expected, registered.BackupInterval.Get())
+ }()
+ }
+
+ close(getConfigWait)
+ wg.Wait()
+}
+
+func writeFile(t *testing.T, path string, buf []byte) {
+ t.Helper()
+
+ tm := fmt.Sprintf("%s.tmp", path)
+ err := os.WriteFile(tm, buf, 0o777)
+ require.NoError(t, err)
+ err = os.Rename(tm, path)
+ require.NoError(t, err)
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/runtime/values.go b/platform/dbops/binaries/weaviate-src/usecases/config/runtime/values.go
new file mode 100644
index 0000000000000000000000000000000000000000..11eccb5cd3b0d5a74fe766ce03e503a5c70b2810
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/config/runtime/values.go
@@ -0,0 +1,158 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package runtime
+
+import (
+ "encoding/json"
+ "fmt"
+ "sync"
+ "time"
+
+ "gopkg.in/yaml.v3"
+)
+
+// DynamicType represents different types that is supported in runtime configs
+type DynamicType interface {
+ ~int | ~float64 | ~bool | time.Duration | ~string | []string
+}
+
+// DynamicValue represents any runtime config value. It's zero value is
+// fully usable.
+// If you want zero value with different `default`, use `NewDynamicValue` constructor.
+type DynamicValue[T DynamicType] struct {
+ // val is the dynamically changing value.
+ val *T
+ // mu protects val
+ mu sync.RWMutex
+
+ // def represents the default value.
+ def T
+}
+
+// NewDynamicValue returns an instance of DynamicValue as passed in type
+// with passed in value as default.
+func NewDynamicValue[T DynamicType](val T) *DynamicValue[T] {
+ return &DynamicValue[T]{
+ def: val,
+ }
+}
+
+// Get returns a current value for the given config. It can either be dynamic value or default
+// value (if unable to get dynamic value)
+// Consumer of the dynamic config value should care only about this `Get()` api.
+func (dv *DynamicValue[T]) Get() T {
+ // Handle zero-value of `*DynamicValue[T]` without panic.
+ if dv == nil {
+ var zero T
+ return zero
+ }
+
+ dv.mu.RLock()
+ defer dv.mu.RUnlock()
+
+ if dv.val != nil {
+ return *dv.val
+ }
+ return dv.def
+}
+
+// Reset removes the old dynamic value.
+func (dv *DynamicValue[T]) Reset() {
+ if dv == nil {
+ return
+ }
+
+ dv.mu.Lock()
+ defer dv.mu.Unlock()
+
+ dv.val = nil
+}
+
+// Set is used by the config manager to update the dynamic value.
+func (dv *DynamicValue[T]) SetValue(val T) {
+ // log this at the high level, that someone is trying to set unitilized runtime value
+ if dv == nil {
+ return
+ }
+
+ dv.mu.Lock()
+ defer dv.mu.Unlock()
+
+ dv.val = &val
+
+ // NOTE: doesn't need to set any default value here
+ // as `Get()` api will return default if dynamic value is not set.
+}
+
+// UnmarshalYAML implements `yaml.v3` custom decoding for `DynamicValue` type.
+func (dv *DynamicValue[T]) UnmarshalYAML(node *yaml.Node) error {
+ var val T
+ if err := node.Decode(&val); err != nil {
+ return err
+ }
+ dv.mu.Lock()
+ defer dv.mu.Unlock()
+
+ dv.def = val
+ return nil
+}
+
+// MarshalYAML implements `yaml.v3` custom encoding for `DynamicValue` type.
+func (dv *DynamicValue[T]) MarshalYAML() (any, error) {
+ dv.mu.Lock()
+ val := dv.def
+ if dv.val != nil {
+ val = *dv.val
+ }
+ dv.mu.Unlock()
+
+ return val, nil
+}
+
+// UnmarshalJSON implements `json` custom decoding for `DynamicValue` type.
+func (dv *DynamicValue[T]) UnmarshalJSON(data []byte) error {
+ var val T
+ if err := json.Unmarshal(data, &val); err != nil {
+ return err
+ }
+ dv.mu.Lock()
+ defer dv.mu.Unlock()
+ dv.def = val
+ return nil
+}
+
+// MarshalJSON implements `json` custom encoding for `DynamicValue` type.
+func (dv *DynamicValue[T]) MarshalJSON() ([]byte, error) {
+ dv.mu.Lock()
+ val := dv.def
+ if dv.val != nil {
+ val = *dv.val
+ }
+ dv.mu.Unlock()
+
+ b, err := json.Marshal(val)
+ if err != nil {
+ return nil, err
+ }
+
+ return b, nil
+}
+
+// String implements Stringer interface for `%v` formatting
+// for any dynamic value types.
+func (dv *DynamicValue[T]) String() string {
+ res := dv.val
+ if res == nil {
+ res = &dv.def
+ }
+ return fmt.Sprintf("%v", *res)
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/config/runtime/values_test.go b/platform/dbops/binaries/weaviate-src/usecases/config/runtime/values_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..c71ff0b1b342e065c1c79cd148c43fdaf2198bf5
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/config/runtime/values_test.go
@@ -0,0 +1,251 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package runtime
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v3"
+)
+
+func TestDynamicValue_YAML(t *testing.T) {
+ t.Run("YAML unmarshal should always set `default` value", func(t *testing.T) {
+ val := struct {
+ Foo *DynamicValue[int] `yaml:"foo"`
+ Bar *DynamicValue[float64] `yaml:"bar"`
+ Alice *DynamicValue[bool] `yaml:"alice"`
+ Dave *DynamicValue[time.Duration] `yaml:"dave"`
+ Status *DynamicValue[string] `yaml:"status"`
+ Slice *DynamicValue[[]string] `yaml:"slice"`
+ }{}
+ buf := `
+foo: 2
+bar: 4.5
+alice: true
+dave: 20s
+status: "done"
+slice: ["one", "two", "three"]
+`
+ dec := yaml.NewDecoder(strings.NewReader(buf))
+ dec.KnownFields(true)
+ err := dec.Decode(&val)
+ require.NoError(t, err)
+
+ assert.Equal(t, 2, val.Foo.def)
+ assert.Equal(t, 2, val.Foo.Get())
+ assert.Nil(t, nil, val.Foo.val)
+
+ assert.Equal(t, 4.5, val.Bar.def)
+ assert.Equal(t, 4.5, val.Bar.Get())
+ assert.Nil(t, val.Bar.val)
+
+ assert.Equal(t, true, val.Alice.def)
+ assert.Equal(t, true, val.Alice.Get())
+ assert.Nil(t, val.Alice.val)
+
+ assert.Equal(t, 20*time.Second, val.Dave.def)
+ assert.Equal(t, 20*time.Second, val.Dave.Get())
+ assert.Nil(t, val.Dave.val)
+
+ assert.Equal(t, "done", val.Status.def)
+ assert.Equal(t, "done", val.Status.Get())
+ assert.Nil(t, val.Status.val)
+
+ assert.Equal(t, []string{"one", "two", "three"}, val.Slice.def)
+ assert.Equal(t, []string{"one", "two", "three"}, val.Slice.Get())
+ assert.Nil(t, val.Slice.val)
+ })
+}
+
+func TestDynamicValue_JSON(t *testing.T) {
+ t.Run("JSON unmarshal should always set `default` value", func(t *testing.T) {
+ val := struct {
+ Foo *DynamicValue[int] `json:"foo"`
+ Bar *DynamicValue[float64] `json:"bar"`
+ Alice *DynamicValue[bool] `json:"alice"`
+ Dave *DynamicValue[time.Duration] `json:"dave"`
+ Status *DynamicValue[string] `json:"status"`
+ Slice *DynamicValue[[]string] `json:"slice"`
+ }{}
+ buf := `
+foo: 2
+bar: 4.5
+alice: true
+dave: 20s
+status: "done"
+slice: ["one", "two", "three"]
+`
+ dec := yaml.NewDecoder(strings.NewReader(buf))
+ dec.KnownFields(true)
+ err := dec.Decode(&val)
+ require.NoError(t, err)
+
+ assert.Equal(t, 2, val.Foo.def)
+ assert.Equal(t, 2, val.Foo.Get())
+ assert.Nil(t, nil, val.Foo.val)
+
+ assert.Equal(t, 4.5, val.Bar.def)
+ assert.Equal(t, 4.5, val.Bar.Get())
+ assert.Nil(t, val.Bar.val)
+
+ assert.Equal(t, true, val.Alice.def)
+ assert.Equal(t, true, val.Alice.Get())
+ assert.Nil(t, val.Alice.val)
+
+ assert.Equal(t, 20*time.Second, val.Dave.def)
+ assert.Equal(t, 20*time.Second, val.Dave.Get())
+ assert.Nil(t, val.Dave.val)
+
+ assert.Equal(t, "done", val.Status.def)
+ assert.Equal(t, "done", val.Status.Get())
+ assert.Nil(t, val.Status.val)
+
+ assert.Equal(t, []string{"one", "two", "three"}, val.Slice.def)
+ assert.Equal(t, []string{"one", "two", "three"}, val.Slice.Get())
+ assert.Nil(t, val.Slice.val)
+ })
+}
+
+func TestDynamicValue(t *testing.T) {
+ var (
+ dInt DynamicValue[int]
+ dFloat DynamicValue[float64]
+ dBool DynamicValue[bool]
+ dDuration DynamicValue[time.Duration]
+ dString DynamicValue[string]
+ dSlice DynamicValue[[]string]
+ )
+
+ // invariant: Zero value of any `DynamicValue` is usable and should return correct
+ // underlying `zero-value` as default.
+ assert.Equal(t, int(0), dInt.Get())
+ assert.Equal(t, float64(0), dFloat.Get())
+ assert.Equal(t, false, dBool.Get())
+ assert.Equal(t, time.Duration(0), dDuration.Get())
+ assert.Equal(t, "", dString.Get())
+ assert.Equal(t, []string(nil), dSlice.Get())
+
+ // invariant: `NewDynamicValue` constructor should set custom default and should override
+ // the `zero-value`
+ dInt2 := NewDynamicValue(25)
+ dFloat2 := NewDynamicValue(18.6)
+ dBool2 := NewDynamicValue(true)
+ dDuration2 := NewDynamicValue(4 * time.Second)
+ dString2 := NewDynamicValue("progress")
+ dSlice2 := NewDynamicValue([]string{"one", "two"})
+
+ assert.Equal(t, int(25), dInt2.Get())
+ assert.Equal(t, float64(18.6), dFloat2.Get())
+ assert.Equal(t, true, dBool2.Get())
+ assert.Equal(t, time.Duration(4*time.Second), dDuration2.Get())
+ assert.Equal(t, "progress", dString2.Get())
+ assert.Equal(t, []string{"one", "two"}, dSlice2.Get())
+
+ // invariant: After setting dynamic default via `SetValue`, this value should
+ // override both `zero-value` and `custom-default`.
+ dInt.SetValue(30)
+ dFloat.SetValue(22.7)
+ dBool.SetValue(false)
+ dDuration.SetValue(10 * time.Second)
+ dString.SetValue("backlog")
+ dSlice.SetValue([]string{})
+
+ assert.Equal(t, int(30), dInt.Get())
+ assert.Equal(t, float64(22.7), dFloat.Get())
+ assert.Equal(t, false, dBool.Get())
+ assert.Equal(t, time.Duration(10*time.Second), dDuration.Get())
+ assert.Equal(t, "backlog", dString.Get())
+ assert.Equal(t, []string{}, dSlice.Get())
+
+ // invariant: Zero value pointer type should return correct zero value with `Get()` without panic
+ var (
+ zeroInt *DynamicValue[int]
+ zeroFloat *DynamicValue[float64]
+ zeroBool *DynamicValue[bool]
+ zeroDur *DynamicValue[time.Duration]
+ zeroString *DynamicValue[string]
+ zeroSlice *DynamicValue[[]string]
+ )
+
+ assert.Equal(t, 0, zeroInt.Get())
+ assert.Equal(t, float64(0), zeroFloat.Get())
+ assert.Equal(t, false, zeroBool.Get())
+ assert.Equal(t, time.Duration(0), zeroDur.Get())
+ assert.Equal(t, "", zeroString.Get())
+ assert.Equal(t, []string(nil), zeroSlice.Get())
+}
+
+func TestDyanamicValue_Reset(t *testing.T) {
+ foo := NewDynamicValue[int](8)
+ bar := NewDynamicValue[float64](8.9)
+ alice := NewDynamicValue[bool](true)
+ dave := NewDynamicValue[time.Duration](3 * time.Second)
+ status := NewDynamicValue[string]("hello")
+ slice := NewDynamicValue[[]string]([]string{"a", "b", "c"})
+
+ assert.Nil(t, foo.val)
+ foo.SetValue(10)
+ assert.Equal(t, 10, *foo.val)
+ foo.Reset() // reset should only reset val. not default
+ assert.Nil(t, foo.val)
+
+ assert.Nil(t, bar.val)
+ bar.SetValue(9.0)
+ assert.Equal(t, 9.0, *bar.val)
+ bar.Reset() // reset should only reset val. not default
+ assert.Nil(t, bar.val)
+
+ assert.Nil(t, alice.val)
+ alice.SetValue(true)
+ assert.Equal(t, true, *alice.val)
+ alice.Reset() // reset should only reset val. not default
+ assert.Nil(t, alice.val)
+
+ assert.Nil(t, dave.val)
+ dave.SetValue(5 * time.Second)
+ assert.Equal(t, 5*time.Second, *dave.val)
+ dave.Reset() // reset should only reset val. not default
+ assert.Nil(t, dave.val)
+
+ assert.Nil(t, status.val)
+ status.SetValue("world")
+ assert.Equal(t, "world", *status.val)
+ status.Reset() // reset should only reset val. not default
+ assert.Nil(t, status.val)
+
+ assert.Nil(t, slice.val)
+ slice.SetValue([]string{"a", "b"})
+ assert.Equal(t, []string{"a", "b"}, *slice.val)
+ slice.Reset() // reset should only reset val. not default
+ assert.Nil(t, slice.val)
+}
+
+func Test_String(t *testing.T) {
+ s := time.Second * 2
+
+ zeroInt := NewDynamicValue(89)
+ zeroFloat := NewDynamicValue(7.8)
+ zeroBool := NewDynamicValue(true)
+ zeroDur := NewDynamicValue(s)
+ zeroString := NewDynamicValue("weaviate")
+
+ assert.Equal(t, "89", fmt.Sprintf("%v", zeroInt))
+ assert.Equal(t, "7.8", fmt.Sprintf("%v", zeroFloat))
+ assert.Equal(t, "true", fmt.Sprintf("%v", zeroBool))
+ assert.Equal(t, s.String(), fmt.Sprintf("%v", zeroDur))
+ assert.Equal(t, "weaviate", fmt.Sprintf("%v", zeroString))
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate.go
new file mode 100644
index 0000000000000000000000000000000000000000..2f21235a4651eacc0a43f397cf6b4ec1c458a5b0
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate.go
@@ -0,0 +1,79 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package generate
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/weaviate/weaviate/entities/models"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/tailor-inc/graphql"
+ "github.com/tailor-inc/graphql/language/ast"
+ "github.com/weaviate/weaviate/entities/modulecapabilities"
+ "github.com/weaviate/weaviate/entities/moduletools"
+ "github.com/weaviate/weaviate/entities/search"
+)
+
+const maximumNumberOfGoroutines = 10
+
+type GenerateProvider struct {
+ additionalGenerativeParameters map[string]modulecapabilities.GenerativeProperty
+ defaultProviderName string
+ maximumNumberOfGoroutines int
+ logger logrus.FieldLogger
+}
+
+func NewGeneric(
+ additionalGenerativeParameters map[string]modulecapabilities.GenerativeProperty,
+ defaultProviderName string,
+ logger logrus.FieldLogger,
+) *GenerateProvider {
+ return &GenerateProvider{
+ additionalGenerativeParameters: additionalGenerativeParameters,
+ defaultProviderName: defaultProviderName,
+ maximumNumberOfGoroutines: maximumNumberOfGoroutines,
+ logger: logger,
+ }
+}
+
+func (p *GenerateProvider) AdditionalPropertyDefaultValue() interface{} {
+ return &Params{}
+}
+
+func (p *GenerateProvider) ExtractAdditionalFn(param []*ast.Argument, class *models.Class) interface{} {
+ return p.parseGenerateArguments(param, class)
+}
+
+func (p *GenerateProvider) AdditionalFieldFn(classname string) *graphql.Field {
+ return p.additionalGenerateField(classname)
+}
+
+func (p *GenerateProvider) AdditionalPropertyFn(ctx context.Context,
+ in []search.Result, params interface{}, limit *int,
+ argumentModuleParams map[string]interface{}, cfg moduletools.ClassConfig,
+) ([]search.Result, error) {
+ if parameters, ok := params.(*Params); ok {
+ if len(parameters.Options) > 1 {
+ var providerNames []string
+ for name := range parameters.Options {
+ providerNames = append(providerNames, name)
+ }
+ return nil, fmt.Errorf("multiple providers selected: %v, please choose only one", providerNames)
+ }
+ return p.generateResult(ctx, in, parameters, limit, argumentModuleParams, cfg)
+ }
+ return nil, errors.New("wrong parameters")
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate_graphql_field.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate_graphql_field.go
new file mode 100644
index 0000000000000000000000000000000000000000..4540f4f20258263085fdb4c0dcb3962eae08a173
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate_graphql_field.go
@@ -0,0 +1,111 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package generate
+
+import (
+ "fmt"
+
+ "github.com/tailor-inc/graphql"
+)
+
+func (p *GenerateProvider) additionalGenerateField(className string) *graphql.Field {
+ generate := &graphql.Field{
+ Args: graphql.FieldConfigArgument{
+ "singleResult": &graphql.ArgumentConfig{
+ Description: "Results per object",
+ Type: graphql.NewInputObject(graphql.InputObjectConfig{
+ Name: fmt.Sprintf("%sIndividualResultsArg", className),
+ Fields: p.singleResultArguments(className),
+ }),
+ DefaultValue: nil,
+ },
+ "groupedResult": &graphql.ArgumentConfig{
+ Description: "Grouped results of all objects",
+ Type: graphql.NewInputObject(graphql.InputObjectConfig{
+ Name: fmt.Sprintf("%sAllResultsArg", className),
+ Fields: p.groupedResultArguments(className),
+ }),
+ DefaultValue: nil,
+ },
+ },
+ Type: graphql.NewObject(graphql.ObjectConfig{
+ Name: fmt.Sprintf("%sAdditionalGenerate", className),
+ Fields: p.fields(className),
+ }),
+ }
+ return generate
+}
+
+func (p *GenerateProvider) singleResultArguments(className string) graphql.InputObjectConfigFieldMap {
+ argumentFields := graphql.InputObjectConfigFieldMap{
+ "prompt": &graphql.InputObjectFieldConfig{
+ Description: "prompt",
+ Type: graphql.String,
+ },
+ "debug": &graphql.InputObjectFieldConfig{
+ Description: "debug",
+ Type: graphql.Boolean,
+ },
+ }
+ p.inputArguments(argumentFields, fmt.Sprintf("%sSingleResult", className))
+ return argumentFields
+}
+
+func (p *GenerateProvider) groupedResultArguments(className string) graphql.InputObjectConfigFieldMap {
+ argumentFields := graphql.InputObjectConfigFieldMap{
+ "task": &graphql.InputObjectFieldConfig{
+ Description: "task",
+ Type: graphql.String,
+ },
+ "properties": &graphql.InputObjectFieldConfig{
+ Description: "Properties used for the generation",
+ Type: graphql.NewList(graphql.String),
+ DefaultValue: nil,
+ },
+ "debug": &graphql.InputObjectFieldConfig{
+ Description: "debug",
+ Type: graphql.Boolean,
+ },
+ }
+ p.inputArguments(argumentFields, fmt.Sprintf("%sGroupedResult", className))
+ return argumentFields
+}
+
+func (p *GenerateProvider) inputArguments(argumentFields graphql.InputObjectConfigFieldMap, prefix string) {
+ // Dynamic RAG syntax generative module specific request parameters
+ for name, generativeParameters := range p.additionalGenerativeParameters {
+ if generativeParameters.RequestParamsFunction != nil {
+ argumentFields[name] = generativeParameters.RequestParamsFunction(prefix)
+ }
+ }
+}
+
+func (p *GenerateProvider) fields(className string) graphql.Fields {
+ fields := graphql.Fields{
+ "singleResult": &graphql.Field{Type: graphql.String},
+ "groupedResult": &graphql.Field{Type: graphql.String},
+ "error": &graphql.Field{Type: graphql.String},
+ "debug": &graphql.Field{Type: graphql.NewObject(graphql.ObjectConfig{
+ Name: fmt.Sprintf("%sDebugFields", className),
+ Fields: graphql.Fields{
+ "prompt": &graphql.Field{Type: graphql.String},
+ },
+ })},
+ }
+ // Dynamic RAG syntax generative module specific response parameters
+ for name, generativeParameters := range p.additionalGenerativeParameters {
+ if generativeParameters.ResponseParamsFunction != nil {
+ fields[name] = generativeParameters.ResponseParamsFunction(className)
+ }
+ }
+ return fields
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate_graphql_field_test.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate_graphql_field_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..a8f581cdcd3557c07e4ade7dc9a8424a91f838a1
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate_graphql_field_test.go
@@ -0,0 +1,42 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package generate
+
+import (
+ "testing"
+)
+
+func TestGenerateField(t *testing.T) {
+ //t.Run("should generate generate argument properly", func(t *testing.T) {
+ // // given
+ // answerProvider := &GenerateProvider{}
+ // classname := "Class"
+ //
+ // // when
+ // answer := answerProvider.additionalGenerateField(classname)
+ //
+ // // then
+ // // the built graphQL field needs to support this structure:
+ // // Type: {
+ // // generate: {
+ // // result: "result",
+ // // }
+ // // }
+ // assert.NotNil(t, answer)
+ // assert.Equal(t, "ClassAdditionalGenerate", answer.Type.Name())
+ // assert.NotNil(t, answer.Type)
+ // answerObject, answerObjectOK := answer.Type.(*graphql.Object)
+ // assert.True(t, answerObjectOK)
+ // assert.Equal(t, 1, len(answerObject.Fields()))
+ // assert.NotNil(t, answerObject.Fields()["result"])
+ //})
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate_params.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate_params.go
new file mode 100644
index 0000000000000000000000000000000000000000..f5b603d14ae6607ee7a69d6b73b894da532880a9
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate_params.go
@@ -0,0 +1,25 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package generate
+
+type Params struct {
+ Prompt *string
+ Task *string
+ Properties []string
+ PropertiesToExtract []string
+ Debug bool
+ Options map[string]interface{}
+}
+
+func (n Params) GetPropertiesToExtract() []string {
+ return n.PropertiesToExtract
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate_params_extractor.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate_params_extractor.go
new file mode 100644
index 0000000000000000000000000000000000000000..79368fc92282e2a92cce7885c335e3f277014b2e
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate_params_extractor.go
@@ -0,0 +1,112 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package generate
+
+import (
+ "log"
+ "regexp"
+ "strings"
+
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/entities/schema"
+
+ "github.com/tailor-inc/graphql/language/ast"
+)
+
+var compile, _ = regexp.Compile(`{([\w\s]*?)}`)
+
+func (p *GenerateProvider) parseGenerateArguments(args []*ast.Argument, class *models.Class) *Params {
+ out := &Params{Options: make(map[string]interface{})}
+
+ propertiesToExtract := make([]string, 0)
+
+ for _, arg := range args {
+ switch arg.Name.Value {
+ case "singleResult":
+ obj := arg.Value.(*ast.ObjectValue).Fields
+ for _, field := range obj {
+ switch field.Name.Value {
+ case "prompt":
+ out.Prompt = &field.Value.(*ast.StringValue).Value
+ propertiesToExtract = append(propertiesToExtract, ExtractPropsFromPrompt(out.Prompt)...)
+
+ case "debug":
+ out.Debug = field.Value.(*ast.BooleanValue).Value
+ default:
+ // Dynamic RAG syntax generative module specific parameters
+ if value := p.extractGenerativeParameter(field); value != nil {
+ out.Options[field.Name.Value] = value
+ }
+ }
+ }
+ case "groupedResult":
+ obj := arg.Value.(*ast.ObjectValue).Fields
+ propertiesProvided := false
+ for _, field := range obj {
+ switch field.Name.Value {
+ case "task":
+ out.Task = &field.Value.(*ast.StringValue).Value
+ case "properties":
+ inp := field.Value.GetValue().([]ast.Value)
+ out.Properties = make([]string, len(inp))
+
+ for i, value := range inp {
+ out.Properties[i] = value.(*ast.StringValue).Value
+ }
+ propertiesToExtract = append(propertiesToExtract, out.Properties...)
+ propertiesProvided = true
+ case "debug":
+ out.Debug = field.Value.(*ast.BooleanValue).Value
+ default:
+ // Dynamic RAG syntax generative module specific parameters
+ if value := p.extractGenerativeParameter(field); value != nil {
+ out.Options[field.Name.Value] = value
+ }
+ }
+ }
+ if !propertiesProvided {
+ propertiesToExtract = append(propertiesToExtract, schema.GetPropertyNamesFromClass(class, false)...)
+ }
+
+ default:
+ // ignore what we don't recognize
+ log.Printf("Igonore not recognized value: %v", arg.Name.Value)
+ }
+ }
+
+ out.PropertiesToExtract = propertiesToExtract
+
+ return out
+}
+
+func (p *GenerateProvider) extractGenerativeParameter(field *ast.ObjectField) interface{} {
+ if len(p.additionalGenerativeParameters) > 0 {
+ if generative, ok := p.additionalGenerativeParameters[field.Name.Value]; ok {
+ if extractFn := generative.ExtractRequestParamsFunction; extractFn != nil {
+ return extractFn(field)
+ }
+ }
+ }
+ return nil
+}
+
+func ExtractPropsFromPrompt(prompt *string) []string {
+ propertiesToExtract := make([]string, 0)
+ all := compile.FindAll([]byte(*prompt), -1)
+ for entry := range all {
+ propName := string(all[entry])
+ propName = strings.Trim(propName, "{")
+ propName = strings.Trim(propName, "}")
+ propertiesToExtract = append(propertiesToExtract, propName)
+ }
+ return propertiesToExtract
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate_result.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate_result.go
new file mode 100644
index 0000000000000000000000000000000000000000..c2a9ddb45cabcac8e5008808fe62ee1b72a99f0b
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate_result.go
@@ -0,0 +1,270 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package generate
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "sync"
+
+ enterrors "github.com/weaviate/weaviate/entities/errors"
+ "github.com/weaviate/weaviate/entities/modulecapabilities"
+ "github.com/weaviate/weaviate/entities/schema"
+
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/entities/moduletools"
+ "github.com/weaviate/weaviate/entities/search"
+)
+
+func (p *GenerateProvider) generateResult(ctx context.Context,
+ in []search.Result,
+ params *Params,
+ limit *int,
+ argumentModuleParams map[string]interface{},
+ cfg moduletools.ClassConfig,
+) ([]search.Result, error) {
+ if len(in) == 0 {
+ return in, nil
+ }
+ prompt := params.Prompt
+ task := params.Task
+ properties := params.Properties
+ debug := params.Debug
+ provider, settings := p.getProviderSettings(params)
+ client, err := p.getClient(provider)
+ if err != nil {
+ return nil, err
+ }
+
+ var propertyDataTypes map[string]schema.DataType
+ if cfg != nil {
+ propertyDataTypes = cfg.PropertiesDataTypes() // do once for all results to avoid loops over the schema
+ }
+ if task != nil {
+ _, err = p.generateForAllSearchResults(ctx, in, *task, properties, client, settings, debug, cfg, propertyDataTypes)
+ }
+ if prompt != nil {
+ _, err = p.generatePerSearchResult(ctx, in, *prompt, client, settings, debug, cfg, propertyDataTypes)
+ }
+
+ return in, err
+}
+
+func (p *GenerateProvider) getProviderSettings(params *Params) (string, interface{}) {
+ for name, settings := range params.Options {
+ return name, settings
+ }
+ return p.defaultProviderName, nil
+}
+
+func (p *GenerateProvider) getClient(provider string) (modulecapabilities.GenerativeClient, error) {
+ if len(p.additionalGenerativeParameters) > 0 {
+ if generativeParams, ok := p.additionalGenerativeParameters[provider]; ok && generativeParams.Client != nil {
+ return generativeParams.Client, nil
+ }
+ }
+ if provider == "" {
+ if len(p.additionalGenerativeParameters) == 1 {
+ for _, params := range p.additionalGenerativeParameters {
+ return params.Client, nil
+ }
+ }
+ return nil, fmt.Errorf("client not found, empty provider")
+ }
+ return nil, fmt.Errorf("client not found for provider: %s", provider)
+}
+
+func (p *GenerateProvider) generatePerSearchResult(ctx context.Context,
+ in []search.Result,
+ prompt string,
+ client modulecapabilities.GenerativeClient,
+ settings interface{},
+ debug bool,
+ cfg moduletools.ClassConfig,
+ propertyDataTypes map[string]schema.DataType,
+) ([]search.Result, error) {
+ var wg sync.WaitGroup
+ sem := make(chan struct{}, p.maximumNumberOfGoroutines)
+ for i := range in {
+ wg.Add(1)
+ i := i
+ enterrors.GoWrapper(func() {
+ sem <- struct{}{}
+ defer wg.Done()
+ defer func() { <-sem }()
+ var props *modulecapabilities.GenerateProperties
+ if propertyDataTypes != nil {
+ props = p.getProperties(in[i], nil, propertyDataTypes)
+ }
+ generateResult, err := client.GenerateSingleResult(ctx, props, prompt, settings, debug, cfg)
+ p.setIndividualResult(in, i, generateResult, err)
+ }, p.logger)
+ }
+ wg.Wait()
+ return in, nil
+}
+
+func (p *GenerateProvider) generateForAllSearchResults(ctx context.Context,
+ in []search.Result,
+ task string,
+ properties []string,
+ client modulecapabilities.GenerativeClient,
+ settings interface{},
+ debug bool,
+ cfg moduletools.ClassConfig,
+ propertyDataTypes map[string]schema.DataType,
+) ([]search.Result, error) {
+ var propertiesForAllDocs []*modulecapabilities.GenerateProperties
+ if propertyDataTypes != nil {
+ for _, res := range in {
+ propertiesForAllDocs = append(propertiesForAllDocs, p.getProperties(res, properties, propertyDataTypes))
+ }
+ }
+ generateResult, err := client.GenerateAllResults(ctx, propertiesForAllDocs, task, settings, debug, cfg)
+ p.setCombinedResult(in, 0, generateResult, err)
+ return in, nil
+}
+
+func (p *GenerateProvider) getProperties(result search.Result,
+ properties []string, propertyDataTypes map[string]schema.DataType,
+) *modulecapabilities.GenerateProperties {
+ textProperties := map[string]string{}
+ blobProperties := map[string]*string{}
+ allProperties := result.Object().Properties.(map[string]interface{})
+ for property, value := range allProperties {
+ if len(properties) > 0 && !p.containsProperty(property, properties) {
+ continue
+ }
+
+ // Nil property is not useful as an input to a generative model.
+ if value == nil {
+ continue
+ }
+
+ if dt, ok := propertyDataTypes[property]; ok {
+ switch dt {
+ case schema.DataTypeTextArray, schema.DataTypeDateArray, schema.DataTypeStringArray,
+ schema.DataTypeNumberArray, schema.DataTypeIntArray, schema.DataTypeBooleanArray,
+ schema.DataTypeUUIDArray:
+ textProperties[property] = p.marshalInput(value)
+ case schema.DataTypeObject, schema.DataTypeObjectArray:
+ textProperties[property] = p.marshalInput(value)
+ case schema.DataTypePhoneNumber, schema.DataTypeGeoCoordinates:
+ textProperties[property] = p.marshalInput(value)
+ case schema.DataTypeCRef:
+ textProperties[property] = p.marshalInput(value)
+ case schema.DataTypeBlob:
+ v := value.(string)
+ blobProperties[property] = &v
+ default:
+ // all primitive types
+ textProperties[property] = fmt.Sprintf("%v", value)
+ }
+ }
+ }
+ return &modulecapabilities.GenerateProperties{
+ Text: textProperties,
+ Blob: blobProperties,
+ }
+}
+
+func (p *GenerateProvider) marshalInput(in any) string {
+ if val, err := json.Marshal(in); err == nil {
+ return string(val)
+ }
+ return fmt.Sprintf("%v", in)
+}
+
+func (p *GenerateProvider) setCombinedResult(in []search.Result, i int,
+ generateResult *modulecapabilities.GenerateResponse, err error,
+) {
+ ap := in[i].AdditionalProperties
+ if ap == nil {
+ ap = models.AdditionalProperties{}
+ }
+
+ var result *string
+ var params map[string]interface{}
+ var debug *modulecapabilities.GenerateDebugInformation
+ if generateResult != nil {
+ result = generateResult.Result
+ params = generateResult.Params
+ debug = generateResult.Debug
+ }
+
+ generate := map[string]interface{}{
+ "groupedResult": result,
+ "error": err,
+ "debug": debug,
+ }
+
+ for k, v := range params {
+ generate[k] = v
+ }
+
+ ap["generate"] = generate
+
+ in[i].AdditionalProperties = ap
+}
+
+func (p *GenerateProvider) setIndividualResult(in []search.Result, i int,
+ generateResult *modulecapabilities.GenerateResponse, err error,
+) {
+ var result *string
+ var params map[string]interface{}
+ var debug *modulecapabilities.GenerateDebugInformation
+ if generateResult != nil {
+ result = generateResult.Result
+ params = generateResult.Params
+ debug = generateResult.Debug
+ }
+
+ ap := in[i].AdditionalProperties
+ if ap == nil {
+ ap = models.AdditionalProperties{}
+ }
+
+ // pulls out the error from the task rather than cloberring it below
+ if g := ap["generate"]; g != nil {
+ if e := g.(map[string]interface{})["error"]; e != nil {
+ err = e.(error)
+ }
+ }
+
+ generate := map[string]interface{}{
+ "singleResult": result,
+ "error": err,
+ "debug": debug,
+ }
+
+ for k, v := range params {
+ generate[k] = v
+ }
+
+ if ap["generate"] != nil {
+ generate["groupedResult"] = ap["generate"].(map[string]interface{})["groupedResult"]
+ }
+
+ ap["generate"] = generate
+
+ in[i].AdditionalProperties = ap
+}
+
+func (p *GenerateProvider) containsProperty(property string, properties []string) bool {
+ for i := range properties {
+ if properties[i] == property {
+ return true
+ }
+ }
+ return false
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate_test.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..85d06e6dc112e90c24b68631407c25ef459335ac
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/generate/generate_test.go
@@ -0,0 +1,304 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package generate
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/sirupsen/logrus/hooks/test"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/entities/modulecapabilities"
+ "github.com/weaviate/weaviate/entities/moduletools"
+ "github.com/weaviate/weaviate/entities/schema"
+ "github.com/weaviate/weaviate/entities/search"
+)
+
+func TestAdditionalAnswerProvider(t *testing.T) {
+ t.Run("should answer", func(t *testing.T) {
+ // given
+ logger, _ := test.NewNullLogger()
+ client := &fakeClient{}
+ defaultProviderName := "openai"
+ additionalGenerativeParameters := map[string]modulecapabilities.GenerativeProperty{
+ defaultProviderName: {Client: client},
+ }
+ answerProvider := NewGeneric(additionalGenerativeParameters, defaultProviderName, logger)
+ in := []search.Result{
+ {
+ ID: "some-uuid",
+ Schema: map[string]interface{}{
+ "content": "content",
+ },
+ },
+ }
+ s := "this is a task"
+ fakeParams := &Params{
+ Task: &s,
+ }
+ limit := 1
+ argumentModuleParams := map[string]interface{}{}
+
+ // when
+ out, err := answerProvider.AdditionalPropertyFn(context.Background(), in, fakeParams, &limit, argumentModuleParams, nil)
+
+ // then
+ require.Nil(t, err)
+ require.NotEmpty(t, out)
+ assert.Equal(t, 1, len(in))
+ answer, answerOK := in[0].AdditionalProperties["generate"]
+ assert.True(t, answerOK)
+ assert.NotNil(t, answer)
+ answerAdditional, answerAdditionalOK := answer.(map[string]interface{})
+ assert.True(t, answerAdditionalOK)
+ groupedResult, ok := answerAdditional["groupedResult"].(*string)
+ assert.True(t, ok)
+ assert.Equal(t, "this is a task", *groupedResult)
+ })
+}
+
+type fakeClient struct{}
+
+func (c *fakeClient) GenerateAllResults(ctx context.Context, properties []*modulecapabilities.GenerateProperties, task string, settings interface{}, debug bool, cfg moduletools.ClassConfig) (*modulecapabilities.GenerateResponse, error) {
+ return c.getResults(task), nil
+}
+
+func (c *fakeClient) GenerateSingleResult(ctx context.Context, properties *modulecapabilities.GenerateProperties, prompt string, settings interface{}, debug bool, cfg moduletools.ClassConfig) (*modulecapabilities.GenerateResponse, error) {
+ return c.getResult(prompt), nil
+}
+
+func (c *fakeClient) getResults(task string) *modulecapabilities.GenerateResponse {
+ return &modulecapabilities.GenerateResponse{
+ Result: &task,
+ }
+}
+
+func (c *fakeClient) getResult(task string) *modulecapabilities.GenerateResponse {
+ return &modulecapabilities.GenerateResponse{
+ Result: &task,
+ }
+}
+
+func Test_getProperties(t *testing.T) {
+ var provider GenerateProvider
+
+ for _, tt := range []struct {
+ missing any
+ dataType schema.DataType
+ }{
+ {nil, schema.DataTypeBlob},
+ {[]string{}, schema.DataTypeTextArray},
+ {nil, schema.DataTypeTextArray},
+ } {
+ t.Run(fmt.Sprintf("%s=%v", tt.dataType, tt.missing), func(t *testing.T) {
+ result := search.Result{
+ Schema: models.PropertySchema(map[string]any{
+ "missing": tt.missing,
+ }),
+ }
+
+ // Get provider to iterate over a result object with a nil property.
+ require.NotPanics(t, func() {
+ provider.getProperties(result, []string{"missing"},
+ map[string]schema.DataType{"missing": tt.dataType})
+ })
+ })
+ }
+}
+
+func Test_getProperties_parseValues(t *testing.T) {
+ blobValue := "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGAAAAA/CAYAAAAfQM0aAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAyRpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuMy1jMDExIDY2LjE0NTY2MSwgMjAxMi8wMi8wNi0xNDo1NjoyNyAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZVJlZiMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENTNiAoTWFjaW50b3NoKSIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDpCRjQ5NEM3RDI5QTkxMUUyOTc1NENCMzI4N0QwNDNCOSIgeG1wTU06RG9jdW1lbnRJRD0ieG1wLmRpZDpCRjQ5NEM3RTI5QTkxMUUyOTc1NENCMzI4N0QwNDNCOSI+IDx4bXBNTTpEZXJpdmVkRnJvbSBzdFJlZjppbnN0YW5jZUlEPSJ4bXAuaWlkOkJGNDk0QzdCMjlBOTExRTI5NzU0Q0IzMjg3RDA0M0I5IiBzdFJlZjpkb2N1bWVudElEPSJ4bXAuZGlkOkJGNDk0QzdDMjlBOTExRTI5NzU0Q0IzMjg3RDA0M0I5Ii8+IDwvcmRmOkRlc2NyaXB0aW9uPiA8L3JkZjpSREY+IDwveDp4bXBtZXRhPiA8P3hwYWNrZXQgZW5kPSJyIj8+WeGRxAAAB2hJREFUeNrUXFtslUUQ3hJCoQVEKy0k1qQgrRg0vaAJaq1tvJSgaLy8mKDF2IvxBY2Bgm8+iIoxvhB72tTUmKgPigbFKCEtxeKD9hZjAi3GJrYJtqRai7TQB+pMz/zwU/5zzsxe2u4kXwiwZ+bb/Xb/s7v/zEmrra1VTFsFeBRQCtgEuBWwkv5vHPAn4DdAB+B7wBjXcUNDQ8o2dXV1SmDzyhUtLS3tBPyxC9CdrN1ihi/swKuA7YD0BG1uJhQDngdcAnwDeJ86Ole2kLii+J2AFsA+wF9RjRalmEUHaZY8m6RDUYZtn6HPHiRfLm2hck0D7AScAdRH8UokwD2AnwA7UoiUyhaRD/S12dHg+8B1OWA/4BTgqVQCPEJL8haLBNDXEfJt03ziipYH+BJwHFAYJcAWwCeAZQ6CLyPfWyz584nrbCuj74eHwgKsddih2R1ba+jHJ65R1k6PuWNhAd4DZM/BTiWbdhwm5hPXsA0AngY8COgNP4JwSTyu4zE/P18VFhZKP7aNYuouXxFX5Ic8Nc2Ea2D/AfYCNgIORZ0DdusOfnFxcXDwUD09PZKP76alKDUR16KiIlVQUHDl7/39/Uozpg7Xac45YB0dGrQHHw07KVwJpRRbYiKuyCc8+MhXcyXocP2RnvMvJhr8QIBK08EPbGJiQuqq0mX7KD4GIohi4xVPTU0N6/BRamPwu7u7dZb3/RozkW3IB3lZEkGHayeI8FFVVdWaZAIUcD2Wl5fbHHy024XtC6QBkomA/XHIFb8X0Xamp6efASHqt27dGnkVkcNxVlFRoXJycmwOvuLGNmifVATsD/bLZezgKgKE2J+bm3sKHk3XXUWs4Mz87Oxs24OvOLEN26cUAfvFXAkrlKGBCDNXEbAajldXV1+5ijjP+KCrg855x+3nk2uy8SwDdIIIM1cRI6k+0NraqkZGRmzuKAIbFrYf0Q2UaPOA/Wpra3PBNfHhYHq6HbC5qanpGB7ETgPWc0TApTr7eyDolOaj6LRG+/W2Bn94eJg7+DpcowZ+AGb+642NjYfC3wEdXAdI1uK2Du2ksH2HrcHHfggGX4frNVcRMPh7BwcHN8ZiseuuIr4DvKXib29YX2bhmW+wEqYptsREXC2eWXS44oyfuYqYmpra19LSEnkaRgEG6Nj8gGRHESVCRkaG9Kg+IOyTiGtmZqatnZsOV/zMLnjcsF7KH5AIECVCX1+f6u3tlbg4oLmc2VyDy8HgPshg2yzmCo8aFsdAALzpw9dw23REwJkvHPwjSu92UcwVRcAnAd4LaQ6+CVe2AGivAe5WwhcdGp0aoVgmJuIqnBy2uSa18Buxs4AXAJMO401SjLOGfnziyhYg2GrtcNSxSfJ90pI/n7iyBUA7quKv/IYsxhmiZ/ZRy/x94soWAO1nwL0qnhVw2cD/ZfKBvjod9cEnrmwB0DBh9RUVfxHxhYrnUHLtEn2mlHyMOe6HT1wT7oISGSas4ntNzJmsVFczjnMBN1CbfwGD1BYPID8A/lFzbz5xZQsQnmWfExa6ecNVIsBKWuIlgA0qnjG2PLhsou0aZgF3qfil2fg89ssbrhwBNtB+GN/dLUnQ5kbCHYAnAFMAvGpsoY7OlS0krmOhxx7WLHwAeBLwVahN2uIUswgrPB5T8rRv7DxWqDwM+JaCjzue8b5wZe2C7gJ8quKVJqY599vJ1yZHffCJK0uA+wAfAtZYjIO+Gsi3TfOJK0sAfFP/jpKV+HBtKfkutOTPJ64sAVYD3qXgrmwpxVht6McnrmwBMAP4pjlYdRij3tCHT1xZAuDdermOA836gDKKqWNirob1ASZc2eeAl3QH36A+AGP+ohFWxNVSfYAuV9YKyKUTo/bgo2nUB5RQbImJuFqsD9DhyhbAuDgjMI36gFKX7S3XB5S6egSV2Bh8zYyDYjr4SGYi2yzmMIm5YnFGkFOLSQGNjY3X/BtaLBabWQF5XKcO6gOkZT950gAW6wPWuXoEZXEaOqoPyHLcPqkIwvqALFcCZHJmvqP6gEzH7VOKIKgPyHQlwIVUjRzWB1xw3H4+ubIFGE3VyGF9wKjj9ik3D4L6gFFXArCSTlEEzKe3LMIfwvYDNgcf+4P9csSVLUAXt7GD+oBuYfsuW4OvUR/Q7UoA/G2zaRvbOqEI0xRbYiKulusDTrgSYEg6sxKJIKwP6FLyjDYRV4v1ATpc2QKgNZtu6zTqA5o1ObM/h5eDyMvCtrlZObLgNhRv+jAHvkwqQjDzhYPfrvRvF0VcLdQHaHGNxWKrZv0d//hahcqr8Ccww1kRbwPuVMIXHRqd+ptimZiIq0F9gA2urEcQ2jkVf/tz0WG8ixTjnKEfn7iyBQi2WnuULLlV0qE9FrdzPnFlC4CGRQkvqyQ/MqRh6KtO2S948IkrWwC0XwHPAQ4r85z7w+TL1U8Y+8Q14S4oyjA9703AZ4AqFX8RvoTpN8i3/Bi/p+egHz5xZQsQGCasvqGuZhzj76DdpuIZx8FPuOAviWDG8e8qXl0yXxnHPnGdsf8FGAByGwC02iMZswAAAABJRU5ErkJggg=="
+ latitude, longitude := float32(1.1), float32(2.2)
+ for _, tt := range []struct {
+ name string
+ result search.Result
+ propertyDataTypes map[string]schema.DataType
+ expectedTextProperties map[string]string
+ expectedBlobProperties map[string]*string
+ }{
+ {
+ name: "primitive types",
+ result: search.Result{
+ Schema: models.PropertySchema(map[string]any{
+ "text": "text",
+ "string": "string",
+ "date": "date",
+ "number": float64(0.01),
+ "int": int(100),
+ "bool": true,
+ "uuid": "bd512e32-3802-44b1-8d73-65fa8f6e9b59",
+ "blob": blobValue,
+ }),
+ },
+ propertyDataTypes: map[string]schema.DataType{
+ "text": schema.DataTypeText,
+ "string": schema.DataTypeString,
+ "date": schema.DataTypeDate,
+ "number": schema.DataTypeNumber,
+ "int": schema.DataTypeInt,
+ "bool": schema.DataTypeBoolean,
+ "uuid": schema.DataTypeUUID,
+ "blob": schema.DataTypeBlob,
+ },
+ expectedTextProperties: map[string]string{
+ "text": "text",
+ "string": "string",
+ "date": "date",
+ "number": "0.01",
+ "int": "100",
+ "bool": "true",
+ "uuid": "bd512e32-3802-44b1-8d73-65fa8f6e9b59",
+ },
+ expectedBlobProperties: map[string]*string{
+ "blob": &blobValue,
+ },
+ },
+ {
+ name: "array types",
+ result: search.Result{
+ Schema: models.PropertySchema(map[string]any{
+ "text[]": []string{"a", "b", "c"},
+ "string[]": []string{"aa", "bb"},
+ "date[]": []string{"2025-07-12", "2025-07-18"},
+ "number[]": []float64{22.01, 33.0},
+ "int[]": []any{1, 2, 3, 4},
+ "bool[]": []bool{true, false, true},
+ "uuid[]": []any{"bd512e32-3802-44b1-8d73-65fa8f6e9b58", "bd512e32-3802-44b1-8d73-65fa8f6e9b59"},
+ }),
+ },
+ propertyDataTypes: map[string]schema.DataType{
+ "text[]": schema.DataTypeTextArray,
+ "string[]": schema.DataTypeStringArray,
+ "date[]": schema.DataTypeDateArray,
+ "number[]": schema.DataTypeNumberArray,
+ "int[]": schema.DataTypeIntArray,
+ "bool[]": schema.DataTypeBooleanArray,
+ "uuid[]": schema.DataTypeUUIDArray,
+ },
+ expectedTextProperties: map[string]string{
+ "text[]": `["a","b","c"]`,
+ "string[]": `["aa","bb"]`,
+ "date[]": `["2025-07-12","2025-07-18"]`,
+ "number[]": `[22.01,33]`,
+ "int[]": `[1,2,3,4]`,
+ "bool[]": `[true,false,true]`,
+ "uuid[]": `["bd512e32-3802-44b1-8d73-65fa8f6e9b58","bd512e32-3802-44b1-8d73-65fa8f6e9b59"]`,
+ },
+ expectedBlobProperties: nil,
+ },
+ {
+ name: "object types",
+ result: search.Result{
+ Schema: models.PropertySchema(map[string]any{
+ "object": map[string]any{
+ "nestedProp": map[string]any{
+ "a": int64(1),
+ "b_nested": map[string]any{
+ "b": int64(2),
+ "c_nested": map[string]any{
+ "c": int64(3),
+ "nested": map[string]any{},
+ },
+ },
+ },
+ },
+ "object[]": []any{
+ map[string]any{
+ "nestedProp": map[string]any{
+ "a": int64(1),
+ "b_nested": map[string]any{
+ "b": int64(2),
+ "c_nested": map[string]any{
+ "c": int64(3),
+ "nested": map[string]any{},
+ },
+ },
+ },
+ },
+ map[string]any{
+ "book": map[string]any{
+ "author": "Frank Herbert",
+ "title": "Dune",
+ },
+ },
+ },
+ }),
+ },
+ propertyDataTypes: map[string]schema.DataType{
+ "object": schema.DataTypeObject,
+ "object[]": schema.DataTypeObjectArray,
+ },
+ expectedTextProperties: map[string]string{
+ "object": `{"nestedProp":{"a":1,"b_nested":{"b":2,"c_nested":{"c":3,"nested":{}}}}}`,
+ "object[]": `[{"nestedProp":{"a":1,"b_nested":{"b":2,"c_nested":{"c":3,"nested":{}}}}},{"book":{"author":"Frank Herbert","title":"Dune"}}]`,
+ },
+ expectedBlobProperties: nil,
+ },
+ {
+ name: "phone number and geo and cref",
+ result: search.Result{
+ Schema: models.PropertySchema(map[string]any{
+ "phoneNumber": &models.PhoneNumber{CountryCode: 49, Input: "500600700"},
+ "phoneNumberMap": map[string]interface{}{
+ "national": 0o1711234567,
+ },
+ "geo": &models.GeoCoordinates{Latitude: &latitude, Longitude: &longitude},
+ "geoMap": map[string]interface{}{
+ "latitude": 5.00005,
+ "longitude": 4.00004,
+ },
+ "cref": []string{"AnotherClass"},
+ }),
+ },
+ propertyDataTypes: map[string]schema.DataType{
+ "phoneNumber": schema.DataTypePhoneNumber,
+ "phoneNumberMap": schema.DataTypePhoneNumber,
+ "geo": schema.DataTypeGeoCoordinates,
+ "geoMap": schema.DataTypeGeoCoordinates,
+ "cref": schema.DataTypeCRef,
+ },
+ expectedTextProperties: map[string]string{
+ "phoneNumber": `{"countryCode":49,"input":"500600700"}`,
+ "phoneNumberMap": `{"national":254097783}`,
+ "geo": `{"latitude":1.1,"longitude":2.2}`,
+ "geoMap": `{"latitude":5.00005,"longitude":4.00004}`,
+ "cref": `["AnotherClass"]`,
+ },
+ expectedBlobProperties: nil,
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ properties := []string{}
+ for prop := range tt.expectedTextProperties {
+ properties = append(properties, prop)
+ }
+ for prop := range tt.expectedBlobProperties {
+ properties = append(properties, prop)
+ }
+ provider := &GenerateProvider{}
+ res := provider.getProperties(tt.result, properties, tt.propertyDataTypes)
+ require.NotNil(t, res)
+ assert.Len(t, res.Text, len(tt.expectedTextProperties))
+ for prop, val := range tt.expectedTextProperties {
+ assert.Equal(t, val, res.Text[prop])
+ }
+ assert.Len(t, res.Blob, len(tt.expectedBlobProperties))
+ for prop, val := range tt.expectedBlobProperties {
+ assert.Equal(t, val, res.Blob[prop])
+ }
+ })
+ }
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/models/generative.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/models/generative.go
new file mode 100644
index 0000000000000000000000000000000000000000..c52ce73d5f9c583425e067640ceb473f26953c6a
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/models/generative.go
@@ -0,0 +1,20 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package models
+
+// GenerateResult used in generative OpenAI module to represent
+// the answer to a given question
+type GenerateResult struct {
+ SingleResult *string `json:"singleResult,omitempty"`
+ GroupedResult *string `json:"groupedResult,omitempty"`
+ Error error `json:"error,omitempty"`
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/models/ranker.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/models/ranker.go
new file mode 100644
index 0000000000000000000000000000000000000000..b26eb40dc3e84927ddedd3c512cf919a0e406b33
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/models/ranker.go
@@ -0,0 +1,18 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package models
+
+// Answer used in qna module to represent
+// the answer to a given question
+type RankResult struct {
+ Score *float64 `json:"score,omitempty"`
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector.go
new file mode 100644
index 0000000000000000000000000000000000000000..3817e2f0e93eb4a7c7d315bf7911b5be4c3f5c00
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector.go
@@ -0,0 +1,127 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package projector
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/danaugrs/go-tsne/tsne"
+ "github.com/pkg/errors"
+ "github.com/tailor-inc/graphql/language/ast"
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/entities/moduletools"
+ "github.com/weaviate/weaviate/entities/search"
+ "gonum.org/v1/gonum/mat"
+)
+
+type FeatureProjection struct {
+ Vector []float32 `json:"vector"`
+}
+
+func New() *FeatureProjector {
+ return &FeatureProjector{
+ fixedSeed: time.Now().UnixNano(),
+ }
+}
+
+type FeatureProjector struct {
+ fixedSeed int64
+}
+
+func (f *FeatureProjector) AdditionalPropertyDefaultValue() interface{} {
+ return &Params{}
+}
+
+func (f *FeatureProjector) AdditionalPropertyFn(ctx context.Context,
+ in []search.Result, params interface{}, limit *int,
+ argumentModuleParams map[string]interface{}, cfg moduletools.ClassConfig,
+) ([]search.Result, error) {
+ if parameters, ok := params.(*Params); ok {
+ return f.Reduce(in, parameters)
+ }
+ return nil, errors.New("unknown params")
+}
+
+func (f *FeatureProjector) ExtractAdditionalFn(param []*ast.Argument, class *models.Class) interface{} {
+ return parseFeatureProjectionArguments(param)
+}
+
+func (f *FeatureProjector) Reduce(in []search.Result, params *Params) ([]search.Result, error) {
+ if len(in) == 0 {
+ return nil, nil
+ }
+
+ if params == nil {
+ return nil, fmt.Errorf("no params provided")
+ }
+
+ if len(in[0].Vector) == 0 && len(in[0].Vectors) > 0 {
+ return nil, fmt.Errorf("feature projection doesn't work for multiple vectors")
+ }
+
+ dims := len(in[0].Vector)
+
+ if err := params.SetDefaultsAndValidate(len(in), dims); err != nil {
+ return nil, errors.Wrap(err, "invalid params")
+ }
+
+ matrix, err := f.vectorsToMatrix(in, dims, params)
+ if err != nil {
+ return nil, err
+ }
+ t := tsne.NewTSNE(*params.Dimensions, float64(*params.Perplexity),
+ float64(*params.LearningRate), *params.Iterations, false)
+ t.EmbedData(matrix, nil)
+ rows, cols := t.Y.Dims()
+ if rows != len(in) {
+ return nil, fmt.Errorf("incorrect matrix dimensions after t-SNE len %d != %d", len(in), rows)
+ }
+
+ for i := 0; i < rows; i++ {
+ vector := make([]float32, cols)
+ for j := range vector {
+ vector[j] = float32(t.Y.At(i, j))
+ }
+ up := in[i].AdditionalProperties
+ if up == nil {
+ up = models.AdditionalProperties{}
+ }
+
+ up["featureProjection"] = &FeatureProjection{
+ Vector: vector,
+ }
+
+ in[i].AdditionalProperties = up
+ }
+
+ return in, nil
+}
+
+func (f *FeatureProjector) vectorsToMatrix(in []search.Result, dims int, params *Params) (*mat.Dense, error) {
+ items := len(in)
+
+ // concat all vectors to build gonum dense matrix
+ mergedVectors := make([]float64, items*dims)
+ for i, obj := range in {
+ if l := len(obj.Vector); l != dims {
+ return nil, fmt.Errorf("inconsistent vector lengths found: %d and %d", dims, l)
+ }
+
+ for j, dim := range obj.Vector {
+ mergedVectors[i*dims+j] = float64(dim)
+ }
+ }
+
+ return mat.NewDense(len(in), dims, mergedVectors), nil
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_graphql_field.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_graphql_field.go
new file mode 100644
index 0000000000000000000000000000000000000000..9fe693f5842cf9c302f29820a635e2af789c7826
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_graphql_field.go
@@ -0,0 +1,51 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package projector
+
+import (
+ "fmt"
+
+ "github.com/tailor-inc/graphql"
+)
+
+func (f *FeatureProjector) AdditionalFeatureProjectionField(classname string) *graphql.Field {
+ return &graphql.Field{
+ Args: graphql.FieldConfigArgument{
+ "algorithm": &graphql.ArgumentConfig{
+ Type: graphql.String,
+ DefaultValue: nil,
+ },
+ "dimensions": &graphql.ArgumentConfig{
+ Type: graphql.Int,
+ DefaultValue: nil,
+ },
+ "learningRate": &graphql.ArgumentConfig{
+ Type: graphql.Int,
+ DefaultValue: nil,
+ },
+ "iterations": &graphql.ArgumentConfig{
+ Type: graphql.Int,
+ DefaultValue: nil,
+ },
+ "perplexity": &graphql.ArgumentConfig{
+ Type: graphql.Int,
+ DefaultValue: nil,
+ },
+ },
+ Type: graphql.NewObject(graphql.ObjectConfig{
+ Name: fmt.Sprintf("%sAdditionalFeatureProjection", classname),
+ Fields: graphql.Fields{
+ "vector": &graphql.Field{Type: graphql.NewList(graphql.Float)},
+ },
+ }),
+ }
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_graphql_field_test.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_graphql_field_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..507065812073bf7c600771eb81101d690ca35848
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_graphql_field_test.go
@@ -0,0 +1,56 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package projector
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/tailor-inc/graphql"
+)
+
+func TestFeatureProjectionField(t *testing.T) {
+ t.Run("should generate featureProjection argument properly", func(t *testing.T) {
+ // given
+ classname := "Class"
+ p := New()
+
+ // when
+ featureProjection := p.AdditionalFeatureProjectionField(classname)
+
+ // then
+ // the built graphQL field needs to support this structure:
+ // Args: {
+ // algorithm: "a",
+ // dimensions: 1,
+ // learningRate: 2,
+ // iterations: 3,
+ // perplexity: 4
+ // }
+ // Type: {
+ // vector: [0, 1]
+ // }
+ assert.NotNil(t, featureProjection)
+ assert.Equal(t, "ClassAdditionalFeatureProjection", featureProjection.Type.Name())
+ assert.NotNil(t, featureProjection.Args)
+ assert.Equal(t, 5, len(featureProjection.Args))
+ assert.NotNil(t, featureProjection.Args["algorithm"])
+ assert.NotNil(t, featureProjection.Args["dimensions"])
+ assert.NotNil(t, featureProjection.Args["learningRate"])
+ assert.NotNil(t, featureProjection.Args["iterations"])
+ assert.NotNil(t, featureProjection.Args["perplexity"])
+ featureProjectionObject, featureProjectionObjectOK := featureProjection.Type.(*graphql.Object)
+ assert.True(t, featureProjectionObjectOK)
+ assert.Equal(t, 1, len(featureProjectionObject.Fields()))
+ assert.NotNil(t, featureProjectionObject.Fields()["vector"])
+ })
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_params.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_params.go
new file mode 100644
index 0000000000000000000000000000000000000000..ec485a4d82ced86ce018161c6bcaa9b49bc0a63d
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_params.go
@@ -0,0 +1,90 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package projector
+
+import "github.com/weaviate/weaviate/entities/errorcompounder"
+
+type Params struct {
+ Enabled bool
+ Algorithm *string // optional parameter
+ Dimensions *int // optional parameter
+ Perplexity *int // optional parameter
+ Iterations *int // optional parameter
+ LearningRate *int // optional parameter
+ IncludeNeighbors bool
+}
+
+func (p *Params) SetDefaultsAndValidate(inputSize, dims int) error {
+ p.setDefaults(inputSize, dims)
+ return p.validate(inputSize, dims)
+}
+
+func (p *Params) setDefaults(inputSize, dims int) {
+ perplexity := p.min(inputSize-1, 5)
+ p.Algorithm = p.optionalString(p.Algorithm, "tsne")
+ p.Dimensions = p.optionalInt(p.Dimensions, 2)
+ p.Perplexity = p.optionalInt(p.Perplexity, perplexity)
+ p.Iterations = p.optionalInt(p.Iterations, 100)
+ p.LearningRate = p.optionalInt(p.LearningRate, 25)
+}
+
+func (p *Params) validate(inputSize, dims int) error {
+ ec := errorcompounder.New()
+ if *p.Algorithm != "tsne" {
+ ec.Addf("algorithm %s is not supported: must be one of: tsne", *p.Algorithm)
+ }
+
+ if *p.Perplexity >= inputSize {
+ ec.Addf("perplexity must be smaller than amount of items: %d >= %d", *p.Perplexity, inputSize)
+ }
+
+ if *p.Iterations < 1 {
+ ec.Addf("iterations must be at least 1, got: %d", *p.Iterations)
+ }
+
+ if *p.LearningRate < 1 {
+ ec.Addf("learningRate must be at least 1, got: %d", *p.LearningRate)
+ }
+
+ if *p.Dimensions < 1 {
+ ec.Addf("dimensions must be at least 1, got: %d", *p.Dimensions)
+ }
+
+ if *p.Dimensions >= dims {
+ ec.Addf("dimensions must be smaller than source dimensions: %d >= %d", *p.Dimensions, dims)
+ }
+
+ return ec.ToError()
+}
+
+func (p *Params) min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func (p Params) optionalString(in *string, defaultValue string) *string {
+ if in == nil {
+ return &defaultValue
+ }
+
+ return in
+}
+
+func (p Params) optionalInt(in *int, defaultValue int) *int {
+ if in == nil {
+ return &defaultValue
+ }
+
+ return in
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_params_extractor.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_params_extractor.go
new file mode 100644
index 0000000000000000000000000000000000000000..f3b607b0127c158dd7ca37453b689e36bf5fb94e
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_params_extractor.go
@@ -0,0 +1,54 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package projector
+
+import (
+ "strconv"
+
+ "github.com/tailor-inc/graphql/language/ast"
+)
+
+func parseFeatureProjectionArguments(args []*ast.Argument) *Params {
+ out := &Params{Enabled: true}
+
+ for _, arg := range args {
+ switch arg.Name.Value {
+ case "dimensions":
+ asInt, _ := strconv.Atoi(arg.Value.GetValue().(string))
+ out.Dimensions = ptInt(asInt)
+ case "iterations":
+ asInt, _ := strconv.Atoi(arg.Value.GetValue().(string))
+ out.Iterations = ptInt(asInt)
+ case "learningRate":
+ asInt, _ := strconv.Atoi(arg.Value.GetValue().(string))
+ out.LearningRate = ptInt(asInt)
+ case "perplexity":
+ asInt, _ := strconv.Atoi(arg.Value.GetValue().(string))
+ out.Perplexity = ptInt(asInt)
+ case "algorithm":
+ out.Algorithm = ptString(arg.Value.GetValue().(string))
+
+ default:
+ // ignore what we don't recognize
+ }
+ }
+
+ return out
+}
+
+func ptString(in string) *string {
+ return &in
+}
+
+func ptInt(in int) *int {
+ return &in
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_params_extractor_test.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_params_extractor_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..2dd298a35f2f9a95d6fae58ea4de4832b6caf670
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_params_extractor_test.go
@@ -0,0 +1,144 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package projector
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/tailor-inc/graphql/language/ast"
+)
+
+func Test_parseFeatureProjectionArguments(t *testing.T) {
+ type args struct {
+ args []*ast.Argument
+ }
+ tests := []struct {
+ name string
+ args args
+ want *Params
+ }{
+ {
+ name: "Should create with no params",
+ args: args{
+ args: []*ast.Argument{},
+ },
+ want: &Params{
+ Enabled: true,
+ },
+ },
+ {
+ name: "Should create with all params",
+ args: args{
+ args: []*ast.Argument{
+ createArg("algorithm", "tsne"),
+ createArg("dimensions", "3"),
+ createArg("iterations", "100"),
+ createArg("learningRate", "15"),
+ createArg("perplexity", "10"),
+ },
+ },
+ want: &Params{
+ Enabled: true,
+ Algorithm: ptString("tsne"),
+ Dimensions: ptInt(3),
+ Iterations: ptInt(100),
+ LearningRate: ptInt(15),
+ Perplexity: ptInt(10),
+ },
+ },
+ {
+ name: "Should create with only algorithm param",
+ args: args{
+ args: []*ast.Argument{
+ createArg("algorithm", "tsne"),
+ },
+ },
+ want: &Params{
+ Enabled: true,
+ Algorithm: ptString("tsne"),
+ },
+ },
+ {
+ name: "Should create with only dimensions param",
+ args: args{
+ args: []*ast.Argument{
+ createArg("dimensions", "3"),
+ },
+ },
+ want: &Params{
+ Enabled: true,
+ Dimensions: ptInt(3),
+ },
+ },
+ {
+ name: "Should create with only iterations param",
+ args: args{
+ args: []*ast.Argument{
+ createArg("iterations", "100"),
+ },
+ },
+ want: &Params{
+ Enabled: true,
+ Iterations: ptInt(100),
+ },
+ },
+ {
+ name: "Should create with only learningRate param",
+ args: args{
+ args: []*ast.Argument{
+ createArg("learningRate", "15"),
+ },
+ },
+ want: &Params{
+ Enabled: true,
+ LearningRate: ptInt(15),
+ },
+ },
+ {
+ name: "Should create with only perplexity param",
+ args: args{
+ args: []*ast.Argument{
+ createArg("perplexity", "10"),
+ },
+ },
+ want: &Params{
+ Enabled: true,
+ Perplexity: ptInt(10),
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := parseFeatureProjectionArguments(tt.args.args); !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("parseFeatureProjectionArguments() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func createArg(name string, value string) *ast.Argument {
+ n := ast.Name{
+ Value: name,
+ }
+ val := ast.StringValue{
+ Kind: "Kind",
+ Value: value,
+ }
+ arg := ast.Argument{
+ Name: ast.NewName(&n),
+ Kind: "Kind",
+ Value: ast.NewStringValue(&val),
+ }
+ a := ast.NewArgument(&arg)
+ return a
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_params_test.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_params_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..a6257d7b059131355e28f2a53e5a9ec083f037f5
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_params_test.go
@@ -0,0 +1,126 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package projector
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestParams_validate(t *testing.T) {
+ type args struct {
+ inputSize int
+ dims int
+ }
+ tests := []struct {
+ name string
+ param *Params
+ args args
+ wantErr bool
+ errContains []string
+ }{
+ {
+ name: "Should validate properly with default Params",
+ param: generateParamWithDefaultValues(1, 3),
+ args: args{
+ inputSize: 1,
+ dims: 3,
+ },
+ wantErr: false,
+ },
+ {
+ name: "Should validate properly with default Params with higher inputs",
+ param: generateParamWithDefaultValues(100, 50),
+ args: args{
+ inputSize: 100,
+ dims: 50,
+ },
+ wantErr: false,
+ },
+ {
+ name: "Should not validate - dimensions must be higher then 2",
+ param: generateParamWithDefaultValues(100, 2),
+ args: args{
+ inputSize: 100,
+ dims: 2,
+ },
+ wantErr: true,
+ },
+ {
+ name: "Should not validate - with dimensions equal to 0",
+ param: generateParamWithValues(true, "tsne", 0, 5, 100, 25, true),
+ args: args{
+ inputSize: 100,
+ dims: 2,
+ },
+ wantErr: true,
+ errContains: []string{
+ "dimensions must be at least 1, got: 0",
+ },
+ },
+ {
+ name: "Should not validate - with all wrong values",
+ param: generateParamWithValues(true, "unknown", 5, 5, 0, 0, true),
+ args: args{
+ inputSize: 4,
+ dims: 2,
+ },
+ wantErr: true,
+ errContains: []string{
+ "algorithm unknown is not supported: must be one of: tsne",
+ "perplexity must be smaller than amount of items: 5 >= 4",
+ "iterations must be at least 1, got: 0",
+ "learningRate must be at least 1, got: 0",
+ "dimensions must be smaller than source dimensions: 5 >= 2",
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ p := tt.param
+ err := p.validate(tt.args.inputSize, tt.args.dims)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("Params.validate() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ if tt.wantErr && len(tt.errContains) > 0 {
+ for _, containsString := range tt.errContains {
+ assert.Contains(t, err.Error(), containsString)
+ }
+ }
+ })
+ }
+}
+
+func generateParamWithDefaultValues(inputSize, dims int) *Params {
+ p := &Params{}
+ p.setDefaults(inputSize, dims)
+ return p
+}
+
+func generateParamWithValues(
+ enabled bool,
+ algorithm string,
+ dims, perplexity, iterations, learningRate int,
+ includeNeighbors bool,
+) *Params {
+ p := &Params{
+ Enabled: enabled,
+ Algorithm: &algorithm,
+ Dimensions: &dims,
+ Perplexity: &perplexity,
+ Iterations: &iterations,
+ LearningRate: &learningRate,
+ IncludeNeighbors: includeNeighbors,
+ }
+ return p
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_test.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..4d3f7b0a2a232aa60898a8e45eceecc5fd89f17f
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/projector/projector_test.go
@@ -0,0 +1,71 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package projector
+
+import (
+ "testing"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/weaviate/weaviate/entities/additional"
+ "github.com/weaviate/weaviate/entities/search"
+)
+
+func TestProjector(t *testing.T) {
+ p := New()
+
+ t.Run("with multiple results", func(t *testing.T) {
+ vectors := [][]float32{
+ {1, 0, 0, 0, 0},
+ {0, 0, 1, 0, 0},
+ {1, 1, 1, 0, 0},
+ }
+
+ testData := []search.Result{
+ {
+ Schema: map[string]interface{}{"name": "item1"},
+ Vector: vectors[0],
+ },
+ {
+ Schema: map[string]interface{}{"name": "item2"},
+ Vector: vectors[1],
+ },
+ {
+ Schema: map[string]interface{}{"name": "item3"},
+ Vector: vectors[2],
+ AdditionalProperties: map[string]interface{}{
+ "classification": &additional.Classification{ // verify it doesn't remove existing additional props
+ ID: strfmt.UUID("123"),
+ },
+ },
+ },
+ }
+
+ res, err := p.Reduce(testData, &Params{})
+ require.Nil(t, err)
+ assert.Len(t, res, len(testData))
+ classification, classificationOK := res[2].AdditionalProperties["classification"]
+ assert.True(t, classificationOK)
+ classificationElement, classificationElementOK := classification.(*additional.Classification)
+ assert.True(t, classificationElementOK)
+ assert.Equal(t, classificationElement.ID, strfmt.UUID("123"),
+ "existing additionals should not be removed")
+ for i := 0; i < 3; i++ {
+ featureProjection, featureProjectionOK := res[i].AdditionalProperties["featureProjection"]
+ assert.True(t, featureProjectionOK)
+ fpElement, fpElementOK := featureProjection.(*FeatureProjection)
+ assert.True(t, fpElementOK)
+ assert.Len(t, fpElement.Vector, 2)
+ }
+ })
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/property_extract.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/property_extract.go
new file mode 100644
index 0000000000000000000000000000000000000000..ceb4068583582a447da56a5003bb3368efcb3d7d
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/property_extract.go
@@ -0,0 +1,17 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package additional
+
+// some modules use data from objects - they need to be extracted from the binary-object no matter if the user requested it or not
+type PropertyExtractor interface {
+ GetPropertiesToExtract() []string
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/provider.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/provider.go
new file mode 100644
index 0000000000000000000000000000000000000000..50f02d7f09b5bebe0694a1c71a4e96ddcf787d2b
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/provider.go
@@ -0,0 +1,32 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package additional
+
+import (
+ "context"
+
+ "github.com/weaviate/weaviate/entities/models"
+
+ "github.com/tailor-inc/graphql"
+ "github.com/tailor-inc/graphql/language/ast"
+ "github.com/weaviate/weaviate/entities/moduletools"
+ "github.com/weaviate/weaviate/entities/search"
+)
+
+type AdditionalProperty interface {
+ AdditionalPropertyFn(ctx context.Context,
+ in []search.Result, params interface{}, limit *int,
+ argumentModuleParams map[string]interface{}, cfg moduletools.ClassConfig) ([]search.Result, error)
+ ExtractAdditionalFn(param []*ast.Argument, class *models.Class) interface{}
+ AdditionalPropertyDefaultValue() interface{}
+ AdditionalFieldFn(classname string) *graphql.Field
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/provider_generative.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/provider_generative.go
new file mode 100644
index 0000000000000000000000000000000000000000..365a83ab2c65d8a82cf83bb055ae429f9b5cfbd9
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/provider_generative.go
@@ -0,0 +1,52 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package additional
+
+import (
+ "github.com/sirupsen/logrus"
+
+ "github.com/weaviate/weaviate/entities/modulecapabilities"
+ generativegenerate "github.com/weaviate/weaviate/usecases/modulecomponents/additional/generate"
+)
+
+const PropertyGenerate = "generate"
+
+type GraphQLAdditionalGenerativeProvider struct {
+ generative AdditionalProperty
+}
+
+func NewGenericGenerativeProvider(
+ className string,
+ additionalGenerativeParameters map[string]modulecapabilities.GenerativeProperty,
+ defaultProviderName string,
+ logger logrus.FieldLogger,
+) *GraphQLAdditionalGenerativeProvider {
+ return &GraphQLAdditionalGenerativeProvider{generativegenerate.NewGeneric(additionalGenerativeParameters, defaultProviderName, logger)}
+}
+
+func (p *GraphQLAdditionalGenerativeProvider) AdditionalProperties() map[string]modulecapabilities.AdditionalProperty {
+ additionalProperties := map[string]modulecapabilities.AdditionalProperty{}
+ additionalProperties[PropertyGenerate] = p.getGenerate()
+ return additionalProperties
+}
+
+func (p *GraphQLAdditionalGenerativeProvider) getGenerate() modulecapabilities.AdditionalProperty {
+ return modulecapabilities.AdditionalProperty{
+ GraphQLNames: []string{PropertyGenerate},
+ GraphQLFieldFunction: p.generative.AdditionalFieldFn,
+ GraphQLExtractFunction: p.generative.ExtractAdditionalFn,
+ SearchFunctions: modulecapabilities.AdditionalSearch{
+ ExploreGet: p.generative.AdditionalPropertyFn,
+ ExploreList: p.generative.AdditionalPropertyFn,
+ },
+ }
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/provider_ranker.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/provider_ranker.go
new file mode 100644
index 0000000000000000000000000000000000000000..6a78f81a28d21499809775033d828c5b95fe58f9
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/provider_ranker.go
@@ -0,0 +1,53 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package additional
+
+import (
+ "context"
+
+ "github.com/weaviate/weaviate/entities/modulecapabilities"
+ "github.com/weaviate/weaviate/entities/moduletools"
+ rankerrank "github.com/weaviate/weaviate/usecases/modulecomponents/additional/rank"
+ "github.com/weaviate/weaviate/usecases/modulecomponents/ent"
+)
+
+const PropertyRerank = "rerank"
+
+type reRankerClient interface {
+ Rank(ctx context.Context, query string, documents []string, cfg moduletools.ClassConfig) (*ent.RankResult, error)
+}
+
+type GraphQLAdditionalRankerProvider struct {
+ ReRankerProvider AdditionalProperty
+}
+
+func NewRankerProvider(client reRankerClient) *GraphQLAdditionalRankerProvider {
+ return &GraphQLAdditionalRankerProvider{rankerrank.New(client)}
+}
+
+func (p *GraphQLAdditionalRankerProvider) AdditionalProperties() map[string]modulecapabilities.AdditionalProperty {
+ additionalProperties := map[string]modulecapabilities.AdditionalProperty{}
+ additionalProperties[PropertyRerank] = p.getReRanker()
+ return additionalProperties
+}
+
+func (p *GraphQLAdditionalRankerProvider) getReRanker() modulecapabilities.AdditionalProperty {
+ return modulecapabilities.AdditionalProperty{
+ GraphQLNames: []string{PropertyRerank},
+ GraphQLFieldFunction: p.ReRankerProvider.AdditionalFieldFn,
+ GraphQLExtractFunction: p.ReRankerProvider.ExtractAdditionalFn,
+ SearchFunctions: modulecapabilities.AdditionalSearch{
+ ExploreGet: p.ReRankerProvider.AdditionalPropertyFn,
+ ExploreList: p.ReRankerProvider.AdditionalPropertyFn,
+ },
+ }
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/provider_text2vec.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/provider_text2vec.go
new file mode 100644
index 0000000000000000000000000000000000000000..f93383ccb815559fc217c3497ada41b89d6918f4
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/provider_text2vec.go
@@ -0,0 +1,53 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package additional
+
+import (
+ "github.com/weaviate/weaviate/entities/modulecapabilities"
+ "github.com/weaviate/weaviate/usecases/modulecomponents/additional/projector"
+)
+
+const PropertyFeatureProjection = "featureProjection"
+
+type GraphQLAdditionalArgumentsProvider struct {
+ projector *projector.FeatureProjector
+}
+
+func NewText2VecProvider() *GraphQLAdditionalArgumentsProvider {
+ return &GraphQLAdditionalArgumentsProvider{projector.New()}
+}
+
+func (p *GraphQLAdditionalArgumentsProvider) AdditionalProperties() map[string]modulecapabilities.AdditionalProperty {
+ additionalProperties := map[string]modulecapabilities.AdditionalProperty{}
+ additionalProperties[PropertyFeatureProjection] = p.getFeatureProjection()
+ return additionalProperties
+}
+
+func (p *GraphQLAdditionalArgumentsProvider) getFeatureProjection() modulecapabilities.AdditionalProperty {
+ return modulecapabilities.AdditionalProperty{
+ RestNames: []string{
+ PropertyFeatureProjection,
+ "featureprojection",
+ "feature-projection",
+ "feature_projection",
+ },
+ DefaultValue: p.projector.AdditionalPropertyDefaultValue(),
+ GraphQLNames: []string{PropertyFeatureProjection},
+ GraphQLFieldFunction: p.projector.AdditionalFeatureProjectionField,
+ GraphQLExtractFunction: p.projector.ExtractAdditionalFn,
+ SearchFunctions: modulecapabilities.AdditionalSearch{
+ ObjectList: p.projector.AdditionalPropertyFn,
+ ExploreGet: p.projector.AdditionalPropertyFn,
+ ExploreList: p.projector.AdditionalPropertyFn,
+ },
+ }
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/rank/rank.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/rank/rank.go
new file mode 100644
index 0000000000000000000000000000000000000000..542912aadaa929692f4cd6504985c12252e8b0e5
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/rank/rank.go
@@ -0,0 +1,60 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package rank
+
+import (
+ "context"
+ "errors"
+
+ "github.com/weaviate/weaviate/entities/models"
+
+ "github.com/tailor-inc/graphql"
+ "github.com/tailor-inc/graphql/language/ast"
+ "github.com/weaviate/weaviate/entities/moduletools"
+ "github.com/weaviate/weaviate/entities/search"
+ "github.com/weaviate/weaviate/usecases/modulecomponents/ent"
+)
+
+// const maximumNumberOfGoroutines = 10
+type ReRankerClient interface {
+ Rank(ctx context.Context, query string, documents []string, cfg moduletools.ClassConfig) (*ent.RankResult, error)
+}
+
+type ReRankerProvider struct {
+ client ReRankerClient
+}
+
+func New(reranker ReRankerClient) *ReRankerProvider {
+ return &ReRankerProvider{reranker}
+}
+
+func (p *ReRankerProvider) AdditionalPropertyDefaultValue() interface{} {
+ return &Params{}
+}
+
+func (p *ReRankerProvider) ExtractAdditionalFn(param []*ast.Argument, class *models.Class) interface{} {
+ return p.parseReRankerArguments(param)
+}
+
+func (p *ReRankerProvider) AdditionalFieldFn(classname string) *graphql.Field {
+ return p.additionalReRankerField(classname)
+}
+
+func (p *ReRankerProvider) AdditionalPropertyFn(ctx context.Context,
+ in []search.Result, params interface{}, limit *int,
+ argumentModuleParams map[string]interface{}, cfg moduletools.ClassConfig,
+) ([]search.Result, error) {
+ if parameters, ok := params.(*Params); ok {
+ return p.getScore(ctx, cfg, in, parameters)
+ }
+ return nil, errors.New("wrong parameters")
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/rank/rank_graphql_field.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/rank/rank_graphql_field.go
new file mode 100644
index 0000000000000000000000000000000000000000..cf5955f70ae612e42dccce2cad96a0b38f551268
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/rank/rank_graphql_field.go
@@ -0,0 +1,41 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package rank
+
+import (
+ "fmt"
+
+ "github.com/tailor-inc/graphql"
+)
+
+func (p *ReRankerProvider) additionalReRankerField(classname string) *graphql.Field {
+ return &graphql.Field{
+ Args: graphql.FieldConfigArgument{
+ "query": &graphql.ArgumentConfig{
+ Description: "Properties which contains text",
+ Type: graphql.String,
+ DefaultValue: nil,
+ },
+ "property": &graphql.ArgumentConfig{
+ Description: "Property to rank from",
+ Type: graphql.String,
+ DefaultValue: nil,
+ },
+ },
+ Type: graphql.NewList(graphql.NewObject(graphql.ObjectConfig{
+ Name: fmt.Sprintf("%sAdditionalReranker", classname),
+ Fields: graphql.Fields{
+ "score": &graphql.Field{Type: graphql.Float},
+ },
+ })),
+ }
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/rank/rank_graphql_field_test.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/rank/rank_graphql_field_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..3561cb32b39c0ad1e7e0de678fecc489f60dc6c1
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/rank/rank_graphql_field_test.go
@@ -0,0 +1,43 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package rank
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/tailor-inc/graphql"
+)
+
+func Test_additionalCrossRankerField(t *testing.T) {
+ // given
+ crossRankerProvider := &ReRankerProvider{}
+ classname := "Class"
+
+ // when
+ crossRanker := crossRankerProvider.additionalReRankerField(classname)
+
+ assert.NotNil(t, crossRanker)
+ assert.Equal(t, "ClassAdditionalReranker", crossRanker.Type.Name())
+ assert.NotNil(t, crossRanker.Type)
+ crossRankerObjectList, crossRankerObjectListOK := crossRanker.Type.(*graphql.List)
+ assert.True(t, crossRankerObjectListOK)
+ crossRankerObject, crossRankerObjectOK := crossRankerObjectList.OfType.(*graphql.Object)
+ assert.True(t, crossRankerObjectOK)
+ assert.Equal(t, 1, len(crossRankerObject.Fields()))
+ assert.NotNil(t, crossRankerObject.Fields()["score"])
+
+ assert.NotNil(t, crossRanker.Args)
+ assert.Equal(t, 2, len(crossRanker.Args))
+ assert.NotNil(t, crossRanker.Args["query"])
+ assert.NotNil(t, crossRanker.Args["property"])
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/rank/rank_params.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/rank/rank_params.go
new file mode 100644
index 0000000000000000000000000000000000000000..f5bd6c1caa93fc3a4a5bd574d41e7af6dac2aec0
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/additional/rank/rank_params.go
@@ -0,0 +1,39 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package rank
+
+type Params struct {
+ Property *string
+ Query *string
+}
+
+func (n Params) GetQuery() string {
+ if n.Query != nil {
+ return *n.Query
+ }
+ return ""
+}
+
+func (n Params) GetProperty() string {
+ if n.Property != nil {
+ return *n.Property
+ }
+ return ""
+}
+
+func (n Params) GetPropertiesToExtract() []string {
+ property := n.GetProperty()
+ if property != "" {
+ return []string{property}
+ }
+ return []string{}
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/apikey/google.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/apikey/google.go
new file mode 100644
index 0000000000000000000000000000000000000000..e267ddd0f99797d1dd5b2ec7317378b8570de066
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/apikey/google.go
@@ -0,0 +1,103 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package apikey
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/weaviate/weaviate/usecases/modulecomponents"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+)
+
+type GoogleApiKey struct {
+ mutex sync.RWMutex
+ token *oauth2.Token
+}
+
+func NewGoogleApiKey() *GoogleApiKey {
+ return &GoogleApiKey{}
+}
+
+func (g *GoogleApiKey) GetApiKey(ctx context.Context, envApiKeyValue string, useGenerativeAIEndpoint, useGoogleAuth bool) (string, error) {
+ if useGenerativeAIEndpoint {
+ if apiKey := modulecomponents.GetValueFromContext(ctx, "X-Goog-Studio-Api-Key"); apiKey != "" {
+ return apiKey, nil
+ }
+ if apiKey := modulecomponents.GetValueFromContext(ctx, "X-Google-Studio-Api-Key"); apiKey != "" {
+ return apiKey, nil
+ }
+ } else {
+ if apiKey := modulecomponents.GetValueFromContext(ctx, "X-Goog-Vertex-Api-Key"); apiKey != "" {
+ return apiKey, nil
+ }
+ if apiKey := modulecomponents.GetValueFromContext(ctx, "X-Google-Vertex-Api-Key"); apiKey != "" {
+ return apiKey, nil
+ }
+ }
+ if apiKey := modulecomponents.GetValueFromContext(ctx, "X-Goog-Api-Key"); apiKey != "" {
+ return apiKey, nil
+ }
+ if apiKey := modulecomponents.GetValueFromContext(ctx, "X-Palm-Api-Key"); apiKey != "" {
+ return apiKey, nil
+ }
+ if apiKey := modulecomponents.GetValueFromContext(ctx, "X-Google-Api-Key"); apiKey != "" {
+ return apiKey, nil
+ }
+ if !useGenerativeAIEndpoint && useGoogleAuth {
+ return g.getAuthToken(ctx)
+ }
+ if envApiKeyValue != "" {
+ return envApiKeyValue, nil
+ }
+ return "", errors.New("no api key found " +
+ "neither in request header: X-Palm-Api-Key or X-Goog-Api-Key or X-Goog-Vertex-Api-Key or X-Goog-Studio-Api-Key " +
+ "nor in environment variable under PALM_APIKEY or GOOGLE_APIKEY")
+}
+
+func (g *GoogleApiKey) getAuthToken(ctx context.Context) (string, error) {
+ if accessToken := g.getAccessToken(); accessToken != "" {
+ return accessToken, nil
+ }
+ return g.updateAndGetAccessToken(ctx)
+}
+
+func (g *GoogleApiKey) updateAndGetAccessToken(ctx context.Context) (string, error) {
+ g.mutex.Lock()
+ defer g.mutex.Unlock()
+ // This method checks all possible places for Google credentials and if successful gets the token source
+ // It should only be used with Vertex AI models
+ // Uses scope: https://cloud.google.com/iam/docs/create-short-lived-credentials-direct
+ tokenSource, err := google.DefaultTokenSource(ctx, "https://www.googleapis.com/auth/cloud-platform")
+ if err != nil {
+ return "", fmt.Errorf("unable to find Google credentials: %w", err)
+ }
+ token, err := tokenSource.Token()
+ if err != nil {
+ return "", fmt.Errorf("unable to obtain Google token: %w", err)
+ }
+ g.token = token
+ return token.AccessToken, nil
+}
+
+func (g *GoogleApiKey) getAccessToken() string {
+ g.mutex.RLock()
+ defer g.mutex.RUnlock()
+
+ if g.token != nil && g.token.Valid() {
+ return g.token.AccessToken
+ }
+ return ""
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/batch/batch.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/batch/batch.go
new file mode 100644
index 0000000000000000000000000000000000000000..a68fe7bbcaf5af1dcf626c188fda710e0e46fa02
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/batch/batch.go
@@ -0,0 +1,563 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package batch
+
+import (
+ "context"
+ "fmt"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/weaviate/weaviate/usecases/modulecomponents"
+ "github.com/weaviate/weaviate/usecases/monitoring"
+
+ "github.com/pkg/errors"
+ objectsvectorizer "github.com/weaviate/weaviate/usecases/modulecomponents/vectorizer"
+
+ "github.com/sirupsen/logrus"
+ "github.com/weaviate/weaviate/entities/dto"
+ enterrors "github.com/weaviate/weaviate/entities/errors"
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/entities/moduletools"
+)
+
+var _NUMCPU = runtime.GOMAXPROCS(0)
+
+const BatchChannelSize = 100
+
+type BatchJob[T dto.Embedding] struct {
+ texts []string
+ tokens []int
+ ctx context.Context
+ wg *sync.WaitGroup
+ errs map[int]error
+ cfg moduletools.ClassConfig
+ vecs []T
+ skipObject []bool
+ startTime time.Time
+ apiKeyHash [32]byte
+ tokenSum int
+}
+
+func (b BatchJob[T]) copy() BatchJob[T] {
+ return BatchJob[T]{
+ texts: b.texts,
+ tokens: b.tokens,
+ ctx: b.ctx,
+ wg: b.wg,
+ errs: b.errs,
+ cfg: b.cfg,
+ vecs: b.vecs,
+ skipObject: b.skipObject,
+ startTime: b.startTime,
+ apiKeyHash: b.apiKeyHash,
+ tokenSum: b.tokenSum,
+ }
+}
+
+type BatchClient[T dto.Embedding] interface {
+ Vectorize(ctx context.Context, input []string,
+ config moduletools.ClassConfig) (*modulecomponents.VectorizationResult[T], *modulecomponents.RateLimits, int, error)
+ GetVectorizerRateLimit(ctx context.Context, config moduletools.ClassConfig) *modulecomponents.RateLimits
+ GetApiKeyHash(ctx context.Context, config moduletools.ClassConfig) [32]byte
+}
+
+func NewBatchVectorizer[T dto.Embedding](client BatchClient[T], maxBatchTime time.Duration, settings Settings, logger logrus.FieldLogger, label string) *Batch[T] {
+ batch := Batch[T]{
+ client: client,
+ objectVectorizer: objectsvectorizer.New(),
+ jobQueueCh: make(chan BatchJob[T], BatchChannelSize),
+ maxBatchTime: maxBatchTime,
+ settings: settings,
+ concurrentBatches: atomic.Int32{},
+ logger: logger,
+ Label: label,
+ }
+
+ batch.rateLimitChannel = make(chan rateLimitJob, BatchChannelSize)
+ batch.endOfBatchChannel = make(chan endOfBatchJob, BatchChannelSize)
+
+ enterrors.GoWrapper(func() { batch.batchWorker() }, logger)
+ return &batch
+}
+
+type rateLimitJob struct {
+ rateLimit *modulecomponents.RateLimits
+ apiKeyHash [32]byte
+}
+
+type endOfBatchJob struct {
+ timePerToken float64
+ objectsPerRequest int
+ reservedTokens int
+ reservedReqs int
+ actualTokens int
+ actualReqs int
+ apiKeyHash [32]byte
+ concurrentBatch bool
+}
+
+type Batch[T dto.Embedding] struct {
+ client BatchClient[T]
+ objectVectorizer *objectsvectorizer.ObjectVectorizer
+ jobQueueCh chan BatchJob[T]
+ maxBatchTime time.Duration
+ settings Settings
+ rateLimitChannel chan rateLimitJob
+ endOfBatchChannel chan endOfBatchJob
+ concurrentBatches atomic.Int32
+ logger logrus.FieldLogger
+ Label string
+}
+
+// batchWorker is a go routine that handles the communication with the vectorizer
+//
+// On the high level it has the following steps:
+// 1. It receives a batch job
+// 2. It splits the job into smaller vectorizer-batches if the token limit is reached. Note that objects from different
+// batches are not mixed with each other to simplify returning the vectors.
+// 3. It sends the smaller batches to the vectorizer
+func (b *Batch[T]) batchWorker() {
+ timePerToken := 0.0
+ objectsPerBatch := b.settings.MaxObjectsPerBatch
+
+ rateLimitPerApiKey := make(map[[32]byte]*modulecomponents.RateLimits)
+
+ // the total batch should not take longer than 60s to avoid timeouts. We will only use 40s here to be safe
+ for job := range b.jobQueueCh {
+ // observe how long the batch was in the queue waiting for processing
+ durWaiting := time.Since(job.startTime).Seconds()
+ monitoring.GetMetrics().T2VBatchQueueDuration.WithLabelValues(b.Label, "waiting_for_processing").
+ Observe(durWaiting)
+
+ startProcessingTime := time.Now()
+
+ // check if we already have rate limits for the current api key and reuse them if possible
+ // Note that the rateLimit is a pointer and should only be updated in place and not replaced with a new object
+ // as otherwise any changes are lost
+ rateLimit, ok := rateLimitPerApiKey[job.apiKeyHash]
+ if !ok {
+ rateLimit = b.client.GetVectorizerRateLimit(job.ctx, job.cfg)
+ rateLimitPerApiKey[job.apiKeyHash] = rateLimit
+ }
+ rateLimit.CheckForReset()
+
+ objCounter := 0
+
+ // If the user does not supply rate limits, and we do not have defaults for the provider we don't know the
+ // rate limits without a request => send a small one. This currently only affects OpenAI.
+ for objCounter < len(job.texts) && rateLimit.IsInitialized() {
+ var err error
+ if !job.skipObject[objCounter] {
+ _, err = b.makeRequest(job, job.texts[objCounter:objCounter+1], job.cfg, []int{objCounter}, rateLimit, job.tokens[objCounter])
+ if err != nil {
+ job.errs[objCounter] = err
+ objCounter++
+ continue
+ }
+ }
+ objCounter++
+ }
+
+ // if we have a high rate limit we can send multiple batches in parallel.
+ //
+ // If the rate limit is high enough to "fit" the current batch, we send it concurrently. If not, we wait for
+ // either
+ // - the rate limit to refresh, so we can schedule another concurrent batch
+ // - the current batch to finish, so the next batch can be sent sequentially
+ //
+ // While using the same code both modes are working slightly different:
+ // - For concurrent batching, the amount of used tokens/requests is reserved as long as the batch is running
+ // and is cleared when it finishes. This ensures that we never exceed the rate limit and don't need to check
+ // the rate limit in the sendBatch function (we use a dummy that never fails a check). All updates happen
+ // in the main loop.
+ // - For sequential batching, the rate limit will be passed into the sendBatch function and is observed and
+ // updated there. This allows to use the rate-limit in an optimal way, but also requires more checks. No
+ // concurrent batch can be started while a sequential batch is running.
+ repeats := 0
+ for {
+ timePerToken, objectsPerBatch = b.updateState(rateLimitPerApiKey, timePerToken, objectsPerBatch)
+ expectedNumRequests := 1 + int(1.25*float32(len(job.texts)))/objectsPerBatch // round up to be on the safe side
+
+ stats := monitoring.GetMetrics().T2VRateLimitStats
+ stats.WithLabelValues(b.Label, "token_limit").Set(float64(rateLimit.LimitTokens))
+ stats.WithLabelValues(b.Label, "token_remaining").Set(float64(rateLimit.RemainingTokens))
+ stats.WithLabelValues(b.Label, "token_reserved").Set(float64(rateLimit.ReservedTokens))
+ stats.WithLabelValues(b.Label, "request_limit").Set(float64(rateLimit.LimitRequests))
+ stats.WithLabelValues(b.Label, "request_remaining").Set(float64(rateLimit.RemainingRequests))
+ stats.WithLabelValues(b.Label, "request_reserved").Set(float64(rateLimit.ReservedRequests))
+ stats.WithLabelValues(b.Label, "estimated_requests_needed").Set(float64(expectedNumRequests))
+ stats.WithLabelValues(b.Label, "tokens_needed").Set(float64(job.tokenSum))
+ stats.WithLabelValues(b.Label, "concurrent_batches").Set(float64(b.concurrentBatches.Load()))
+ stats.WithLabelValues(b.Label, "repeats_for_scheduling").Set(float64(repeats))
+
+ if rateLimit.CanSendFullBatch(expectedNumRequests, job.tokenSum, repeats > 0, b.Label) {
+ b.concurrentBatches.Add(1)
+ monitoring.GetMetrics().T2VBatches.WithLabelValues(b.Label).Inc()
+ jobCopy := job.copy()
+ rateLimit.ReservedRequests += expectedNumRequests
+ rateLimit.ReservedTokens += job.tokenSum
+
+ // necessary, because the outer loop can modify these values through b.updateState while the goroutine
+ // is accessing them => race
+ timePerToken := timePerToken
+ expectedNumRequests := expectedNumRequests
+ enterrors.GoWrapper(func() {
+ b.sendBatch(jobCopy, objCounter, dummyRateLimit(), timePerToken, expectedNumRequests, true)
+ monitoring.GetMetrics().T2VBatchQueueDuration.WithLabelValues(b.Label, "processing_async").
+ Observe(time.Since(startProcessingTime).Seconds())
+ }, b.logger)
+ break
+ } else if b.concurrentBatches.Load() < 1 {
+ b.concurrentBatches.Add(1)
+
+ monitoring.GetMetrics().T2VBatches.WithLabelValues(b.Label).Inc()
+ // block so no concurrent batch can be sent
+ b.sendBatch(job, objCounter, rateLimit, timePerToken, 0, false)
+ monitoring.GetMetrics().T2VBatchQueueDuration.WithLabelValues(b.Label, "processing_sync").
+ Observe(time.Since(startProcessingTime).Seconds())
+ break
+ }
+ time.Sleep(100 * time.Millisecond)
+ repeats++
+ }
+ }
+}
+
+// updateState collects the latest updates from finished batches
+func (b *Batch[T]) updateState(rateLimits map[[32]byte]*modulecomponents.RateLimits, timePerToken float64, objectsPerBatch int) (float64, int) {
+ for _, rateLimit := range rateLimits {
+ rateLimit.CheckForReset()
+ }
+
+ // read all values from the channel and only keep the freshest one. This is needed as openAI returns the current
+ // rate limit with every request, and we need to keep the freshest one to get an overview of where we are
+rateLimitLoop:
+ for {
+ select {
+ case rateLimitEntry := <-b.rateLimitChannel:
+ old := rateLimits[rateLimitEntry.apiKeyHash]
+ old.UpdateWithRateLimit(rateLimitEntry.rateLimit)
+ rateLimits[rateLimitEntry.apiKeyHash] = old
+ default:
+ break rateLimitLoop
+ }
+ }
+
+timeLoop:
+ for {
+ select {
+ case endOfBatch := <-b.endOfBatchChannel:
+ timePerToken = endOfBatch.timePerToken
+ if endOfBatch.objectsPerRequest > 0 {
+ objectsPerBatch = endOfBatch.objectsPerRequest
+ }
+
+ // if we have a concurrent batch we need to remove the reserved tokens from the rate limit
+ if endOfBatch.concurrentBatch {
+ rateLimits[endOfBatch.apiKeyHash].ReservedTokens -= endOfBatch.reservedTokens
+ rateLimits[endOfBatch.apiKeyHash].ReservedRequests -= endOfBatch.reservedReqs
+ if !b.settings.ReturnsRateLimit {
+ rateLimits[endOfBatch.apiKeyHash].RemainingTokens -= endOfBatch.actualTokens
+ rateLimits[endOfBatch.apiKeyHash].RemainingRequests -= endOfBatch.actualReqs
+ }
+ }
+
+ default:
+ break timeLoop
+ }
+ }
+ return timePerToken, objectsPerBatch
+}
+
+func (b *Batch[T]) sendBatch(job BatchJob[T], objCounter int, rateLimit *modulecomponents.RateLimits, timePerToken float64, reservedReqs int, concurrentBatch bool) {
+ maxTokensPerBatch := b.settings.MaxTokensPerBatch(job.cfg)
+ estimatedTokensInCurrentBatch := 0
+ numRequests := 0
+ numSendObjects := 0
+ actualTokensUsed := 0
+
+ texts := make([]string, 0, 100)
+ origIndex := make([]int, 0, 100)
+
+ for objCounter < len(job.texts) {
+ if job.ctx.Err() != nil {
+ for j := objCounter; j < len(job.texts); j++ {
+ if !job.skipObject[j] {
+ switch job.ctx.Err() {
+ case context.Canceled:
+ job.errs[j] = fmt.Errorf("context cancelled")
+ case context.DeadlineExceeded:
+ job.errs[j] = fmt.Errorf("context deadline exceeded")
+ default:
+ // this should not happen but we need to handle it
+ job.errs[j] = fmt.Errorf("context error: %w", job.ctx.Err())
+ }
+ }
+ }
+ break
+ }
+
+ if job.skipObject[objCounter] {
+ objCounter++
+ continue
+ }
+
+ // add objects to the current vectorizer-batch until the remaining tokens are used up or other limits are reached
+ text := job.texts[objCounter]
+ if float32(estimatedTokensInCurrentBatch+job.tokens[objCounter]) <= 0.95*float32(rateLimit.RemainingTokens) &&
+ float32(estimatedTokensInCurrentBatch+job.tokens[objCounter]) <= 0.95*float32(maxTokensPerBatch) &&
+ (timePerToken*float64(estimatedTokensInCurrentBatch) < b.settings.MaxTimePerBatch) &&
+ len(texts) < b.settings.MaxObjectsPerBatch {
+ estimatedTokensInCurrentBatch += job.tokens[objCounter]
+ texts = append(texts, text)
+ origIndex = append(origIndex, objCounter)
+ objCounter++
+ if objCounter < len(job.texts) {
+ continue
+ }
+ }
+
+ // if a single object is larger than the current token limit it will fail all tests above. Then we need to either
+ // - wait until the token limit refreshes. This assumes that the tokenLimit refreshes linearly which is true
+ // for openAI, but needs to be checked for other providers
+ // - send it anyway and let the provider fail it
+ if len(texts) == 0 {
+ fractionOfTotalLimit := float64(job.tokens[objCounter]) / float64(rateLimit.LimitTokens)
+ sleepTime := time.Duration(fractionOfTotalLimit * float64(time.Until(rateLimit.ResetTokens)))
+ // Only sleep if values are reasonable, e.g. for the token counter is lower than the limit token and we do
+ // not blow up the sleep time
+ if sleepTime > 0 && fractionOfTotalLimit < 1 && time.Since(job.startTime)+sleepTime < b.maxBatchTime && !concurrentBatch {
+ time.Sleep(sleepTime)
+ rateLimit.RemainingTokens += int(float64(rateLimit.LimitTokens) * fractionOfTotalLimit)
+ continue // try again after tokens have hopefully refreshed
+ } else {
+ // send the item in an individual request even if it is larger than the absolute token limit. It needs
+ // to fail to propagate the proper error to the user - also our tokenCounts are approximations so even if
+ // an objects seems to be too big it might as well work
+ texts = append(texts, text)
+ origIndex = append(origIndex, objCounter)
+ estimatedTokensInCurrentBatch += job.tokens[objCounter]
+ objCounter++
+ }
+ }
+
+ start := time.Now()
+ actualTokensUsedInReq, _ := b.makeRequest(job, texts, job.cfg, origIndex, rateLimit, estimatedTokensInCurrentBatch)
+ actualTokensUsed += actualTokensUsedInReq
+ batchTookInS := time.Since(start).Seconds()
+ if estimatedTokensInCurrentBatch > 0 {
+ timePerToken = batchTookInS / float64(estimatedTokensInCurrentBatch)
+ }
+ numRequests += 1
+ numSendObjects += len(texts)
+
+ // in case of low rate limits we should not send the next batch immediately but sleep a bit
+ batchesPerMinute := 61.0 / batchTookInS
+ if batchesPerMinute > float64(rateLimit.LimitRequests) {
+ sleepFor := time.Duration((60.0-batchTookInS*float64(rateLimit.LimitRequests))/float64(rateLimit.LimitRequests)) * time.Second
+ // limit for how long we sleep to avoid deadlocks. This can happen if we get values from the vectorizer that
+ // should not happen such as the LimitRequests being 0
+ time.Sleep(min(b.maxBatchTime/2, sleepFor))
+
+ // adapt the batches per limit
+ batchesPerMinute = float64(rateLimit.LimitRequests)
+ }
+ if batchesPerMinute*float64(estimatedTokensInCurrentBatch) > float64(rateLimit.LimitTokens) {
+ sleepFor := batchTookInS * (batchesPerMinute*float64(estimatedTokensInCurrentBatch) - float64(rateLimit.LimitTokens)) / float64(rateLimit.LimitTokens)
+ // limit for how long we sleep to avoid deadlocks. This can happen if we get values from the vectorizer that
+ // should not happen such as the LimitTokens being 0
+ sleepTime := min(b.maxBatchTime/2, time.Duration(sleepFor*float64(time.Second)))
+ time.Sleep(sleepTime)
+ }
+
+ // not all request limits are included in "RemainingRequests" and "ResetRequests". For example, in the OpenAI
+ // free tier only the RPD limits are shown but not RPM
+ if rateLimit.RemainingRequests <= 0 && time.Until(rateLimit.ResetRequests) > 0 {
+ // if we need to wait more than MaxBatchTime for a reset we need to stop the batch to not produce timeouts
+ if time.Since(job.startTime)+time.Until(rateLimit.ResetRequests) > b.maxBatchTime {
+ for j := origIndex[0]; j < len(job.texts); j++ {
+ if !job.skipObject[j] {
+ job.errs[j] = errors.New("request rate limit exceeded and will not refresh in time")
+ }
+ }
+ break
+ }
+ time.Sleep(time.Until(rateLimit.ResetRequests))
+ }
+
+ // reset for next vectorizer-batch
+ estimatedTokensInCurrentBatch = 0
+ texts = texts[:0]
+ origIndex = origIndex[:0]
+ }
+
+ // in case we exit the loop without sending the last batch. This can happen when the last object is a skip or
+ // is too long
+ if len(texts) > 0 && objCounter == len(job.texts) {
+ actualTokensUsedInReq, _ := b.makeRequest(job, texts, job.cfg, origIndex, rateLimit, estimatedTokensInCurrentBatch)
+ actualTokensUsed += actualTokensUsedInReq
+ }
+ objectsPerRequest := 0
+ if numRequests > 0 {
+ objectsPerRequest = numSendObjects / numRequests
+ }
+ monitoring.GetMetrics().T2VRequestsPerBatch.WithLabelValues(b.Label).Observe(float64(numRequests))
+ b.endOfBatchChannel <- endOfBatchJob{
+ timePerToken: timePerToken,
+ objectsPerRequest: objectsPerRequest,
+ reservedTokens: job.tokenSum,
+ reservedReqs: reservedReqs,
+ actualTokens: actualTokensUsed,
+ actualReqs: numRequests,
+ apiKeyHash: job.apiKeyHash,
+ concurrentBatch: concurrentBatch,
+ }
+ job.wg.Done()
+ b.concurrentBatches.Add(-1)
+ monitoring.GetMetrics().T2VBatches.WithLabelValues(b.Label).Dec()
+}
+
+func (b *Batch[T]) makeRequest(job BatchJob[T], texts []string, cfg moduletools.ClassConfig, origIndex []int, rateLimit *modulecomponents.RateLimits, tokensInCurrentBatch int) (int, error) {
+ beforeRequest := time.Now()
+ defer func() {
+ monitoring.GetMetrics().T2VRequestDuration.WithLabelValues(b.Label).
+ Observe(time.Since(beforeRequest).Seconds())
+ }()
+
+ monitoring.GetMetrics().T2VTokensInRequest.WithLabelValues(b.Label).
+ Observe(float64(tokensInCurrentBatch))
+
+ res, rateLimitNew, tokensUsed, err := b.client.Vectorize(job.ctx, texts, cfg)
+
+ if err != nil {
+ b.logger.WithField("class", job.cfg.Class()).WithError(err).Debug("vectorization failed")
+ monitoring.GetMetrics().ModuleBatchError.WithLabelValues("batchVectorize", b.Label).Inc()
+ for j := 0; j < len(texts); j++ {
+ job.errs[origIndex[j]] = err
+ }
+ } else {
+ for j := 0; j < len(texts); j++ {
+ if res.Errors != nil && res.Errors[j] != nil {
+ job.errs[origIndex[j]] = res.Errors[j]
+ } else {
+ job.vecs[origIndex[j]] = res.Vector[j]
+ }
+ }
+ }
+ if rateLimitNew != nil {
+ rateLimit.UpdateWithRateLimit(rateLimitNew)
+ b.rateLimitChannel <- rateLimitJob{rateLimit: rateLimitNew, apiKeyHash: job.apiKeyHash}
+ } else if b.settings.HasTokenLimit {
+ if tokensUsed > -1 {
+ tokensInCurrentBatch = tokensUsed
+ }
+ rateLimit.ResetAfterRequestFunction(tokensInCurrentBatch)
+ }
+ return tokensUsed, err
+}
+
+func (b *Batch[T]) SubmitBatchAndWait(ctx context.Context, cfg moduletools.ClassConfig, skipObject []bool, tokenCounts []int, texts []string) ([]T, map[int]error) {
+ vecs := make([]T, len(skipObject))
+ errs := make(map[int]error)
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+
+ tokenSum := 0
+ for i := range tokenCounts {
+ tokenSum += tokenCounts[i]
+ }
+
+ monitoring.GetMetrics().T2VTokensInBatch.WithLabelValues(b.Label).
+ Observe(float64(tokenSum))
+
+ beforeEnqueue := time.Now()
+ b.jobQueueCh <- BatchJob[T]{
+ ctx: ctx,
+ wg: &wg,
+ errs: errs,
+ cfg: cfg,
+ texts: texts,
+ tokens: tokenCounts,
+ vecs: vecs,
+ skipObject: skipObject,
+ apiKeyHash: b.client.GetApiKeyHash(ctx, cfg),
+ startTime: time.Now(),
+ tokenSum: tokenSum,
+ }
+
+ // observe enqueue duration
+ monitoring.GetMetrics().T2VBatchQueueDuration.WithLabelValues(b.Label, "enqueue").
+ Observe(time.Since(beforeEnqueue).Seconds())
+
+ wg.Wait()
+
+ // observe total duration
+ monitoring.GetMetrics().T2VBatchQueueDuration.WithLabelValues(b.Label, "total").
+ Observe(time.Since(beforeEnqueue).Seconds())
+ return vecs, errs
+}
+
+type objectVectorizer[T []float32 | [][]float32] func(context.Context, *models.Object, moduletools.ClassConfig) (T, models.AdditionalProperties, error)
+
+func VectorizeBatch[T []float32 | [][]float32](ctx context.Context, objs []*models.Object, skipObject []bool, cfg moduletools.ClassConfig, logger logrus.FieldLogger, objectVectorizer objectVectorizer[T]) ([]T, []models.AdditionalProperties, map[int]error) {
+ vecs := make([]T, len(objs))
+ // error should be the exception so dont preallocate
+ errs := make(map[int]error, 0)
+ errorLock := sync.Mutex{}
+
+ // error group is used to limit concurrency
+ eg := enterrors.NewErrorGroupWrapper(logger)
+ eg.SetLimit(_NUMCPU * 2)
+ for i := range objs {
+ i := i
+
+ if skipObject[i] {
+ continue
+ }
+ eg.Go(func() error {
+ vec, _, err := objectVectorizer(ctx, objs[i], cfg)
+ if err != nil {
+ errorLock.Lock()
+ defer errorLock.Unlock()
+ errs[i] = err
+ }
+ vecs[i] = vec
+ return nil
+ })
+ }
+ err := eg.Wait()
+ if err != nil {
+ for i := range objs {
+ if skipObject[i] {
+ continue
+ }
+ errs[i] = err
+ }
+ return nil, nil, errs
+ }
+ return vecs, nil, errs
+}
+
+func dummyRateLimit() *modulecomponents.RateLimits {
+ return &modulecomponents.RateLimits{
+ LimitRequests: 1000000,
+ LimitTokens: 1000000,
+ RemainingRequests: 1000000,
+ RemainingTokens: 1000000,
+ ResetRequests: time.Now(),
+ ResetTokens: time.Now(),
+ AfterRequestFunction: func(limits *modulecomponents.RateLimits, tokensUsed int, deductRequest bool) {},
+ }
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/batch/batch_config.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/batch/batch_config.go
new file mode 100644
index 0000000000000000000000000000000000000000..7a2d962421ccd0da6adb16240df80f1e323a52b1
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/batch/batch_config.go
@@ -0,0 +1,23 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package batch
+
+import "github.com/weaviate/weaviate/entities/moduletools"
+
+type Settings struct {
+ TokenMultiplier float32
+ MaxTimePerBatch float64
+ MaxObjectsPerBatch int
+ MaxTokensPerBatch func(cfg moduletools.ClassConfig) int
+ HasTokenLimit bool
+ ReturnsRateLimit bool
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/batch/batch_test.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/batch/batch_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..8bc64e0a958d2f291607bea884ce9caca838b462
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/batch/batch_test.go
@@ -0,0 +1,385 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package batch
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/weaviate/tiktoken-go"
+
+ "github.com/weaviate/weaviate/usecases/modulecomponents"
+
+ "github.com/weaviate/weaviate/entities/moduletools"
+
+ "github.com/sirupsen/logrus/hooks/test"
+
+ "github.com/stretchr/testify/require"
+ "github.com/weaviate/weaviate/entities/models"
+)
+
+func maxTokensPerBatch(cfg moduletools.ClassConfig) int {
+ return 100
+}
+
+func TestBatch(t *testing.T) {
+ cfg := &fakeClassConfig{vectorizePropertyName: false, classConfig: map[string]interface{}{"vectorizeClassName": false}}
+ logger, _ := test.NewNullLogger()
+ cases := []struct {
+ name string
+ objects []*models.Object
+ skip []bool
+ wantErrors map[int]error
+ deadline time.Duration
+ }{
+ {name: "skip all", objects: []*models.Object{{Class: "Car"}}, skip: []bool{true}},
+ {name: "skip first", objects: []*models.Object{{Class: "Car"}, {Class: "Car", Properties: map[string]interface{}{"test": "test"}}}, skip: []bool{true, false}},
+ {name: "one object errors", objects: []*models.Object{{Class: "Car", Properties: map[string]interface{}{"test": "test"}}, {Class: "Car", Properties: map[string]interface{}{"test": "error something"}}}, skip: []bool{false, false}, wantErrors: map[int]error{1: fmt.Errorf("something")}},
+ {name: "first object errors", objects: []*models.Object{{Class: "Car", Properties: map[string]interface{}{"test": "error something"}}, {Class: "Car", Properties: map[string]interface{}{"test": "test"}}}, skip: []bool{false, false}, wantErrors: map[int]error{0: fmt.Errorf("something")}},
+ {name: "vectorize all", objects: []*models.Object{{Class: "Car", Properties: map[string]interface{}{"test": "test"}}, {Class: "Car", Properties: map[string]interface{}{"test": "something"}}}, skip: []bool{false, false}},
+ {name: "multiple vectorizer batches", objects: []*models.Object{
+ {Class: "Car", Properties: map[string]interface{}{"test": "tokens 80"}}, // set limit so next 3 objects are one batch
+ {Class: "Car", Properties: map[string]interface{}{"test": "first object first batch"}},
+ {Class: "Car", Properties: map[string]interface{}{"test": "second object first batch"}},
+ {Class: "Car", Properties: map[string]interface{}{"test": "third object first batch"}},
+ {Class: "Car", Properties: map[string]interface{}{"test": "first object second batch"}}, // rate is 100 again
+ {Class: "Car", Properties: map[string]interface{}{"test": "second object second batch"}},
+ {Class: "Car", Properties: map[string]interface{}{"test": "third object second batch"}},
+ {Class: "Car", Properties: map[string]interface{}{"test": "fourth object second batch"}},
+ }, skip: []bool{false, false, false, false, false, false, false, false}},
+ {name: "multiple vectorizer batches with skips and errors", objects: []*models.Object{
+ {Class: "Car", Properties: map[string]interface{}{"test": "tokens 70"}}, // set limit so next 3 objects are one batch
+ {Class: "Car", Properties: map[string]interface{}{"test": "first object first batch"}},
+ {Class: "Car", Properties: map[string]interface{}{"test": "second object first batch"}},
+ {Class: "Car", Properties: map[string]interface{}{"test": "error something"}},
+ {Class: "Car", Properties: map[string]interface{}{"test": "first object second batch"}}, // rate is 100 again
+ {Class: "Car", Properties: map[string]interface{}{"test": "second object second batch"}},
+ {Class: "Car", Properties: map[string]interface{}{"test": "third object second batch"}},
+ {Class: "Car", Properties: map[string]interface{}{"test": "fourth object second batch"}},
+ }, skip: []bool{false, true, false, false, false, true, false, false}, wantErrors: map[int]error{3: fmt.Errorf("something")}},
+ {name: "token too long", objects: []*models.Object{
+ {Class: "Car", Properties: map[string]interface{}{"test": "tokens 10"}}, // set limit
+ {Class: "Car", Properties: map[string]interface{}{"test": "long long long long, long, long, long, long"}},
+ {Class: "Car", Properties: map[string]interface{}{"test": "short"}},
+ }, skip: []bool{false, false, false}, wantErrors: map[int]error{1: fmt.Errorf("text too long for vectorization from provider: got 43, total limit: 20, remaining: 10")}},
+ {name: "token too long, last item in batch", objects: []*models.Object{
+ {Class: "Car", Properties: map[string]interface{}{"test": "tokens 10"}}, // set limit
+ {Class: "Car", Properties: map[string]interface{}{"test": "short"}},
+ {Class: "Car", Properties: map[string]interface{}{"test": "long long long long, long, long, long, long"}},
+ }, skip: []bool{false, false, false}, wantErrors: map[int]error{2: fmt.Errorf("text too long for vectorization from provider: got 43, total limit: 20, remaining: 10")}},
+ {name: "skip last item", objects: []*models.Object{
+ {Class: "Car", Properties: map[string]interface{}{"test": "1. test object"}}, // set limit
+ {Class: "Car", Properties: map[string]interface{}{"test": "1. obj 1. batch"}},
+ {Class: "Car", Properties: map[string]interface{}{"test": "2. obj 1. batch"}},
+ }, skip: []bool{false, false, true}},
+ {name: "deadline", deadline: 200 * time.Millisecond, objects: []*models.Object{
+ {Class: "Car", Properties: map[string]interface{}{"test": "tokens 40"}}, // set limit so next two items are in a batch
+ {Class: "Car", Properties: map[string]interface{}{"test": "wait 400"}},
+ {Class: "Car", Properties: map[string]interface{}{"test": "long long long long"}},
+ {Class: "Car", Properties: map[string]interface{}{"test": "next batch, will be aborted due to context deadline"}},
+ {Class: "Car", Properties: map[string]interface{}{"test": "skipped"}},
+ {Class: "Car", Properties: map[string]interface{}{"test": "has error again"}},
+ }, skip: []bool{false, false, false, false, true, false}, wantErrors: map[int]error{3: fmt.Errorf("context deadline exceeded"), 5: fmt.Errorf("context deadline exceeded")}},
+ {name: "request error", objects: []*models.Object{
+ {Class: "Car", Properties: map[string]interface{}{"test": "ReqError something"}},
+ }, skip: []bool{false}, wantErrors: map[int]error{0: fmt.Errorf("something")}},
+ }
+ for _, tt := range cases {
+ t.Run(tt.name, func(t *testing.T) {
+ client := &fakeBatchClientWithRL[[]float32]{} // has state
+
+ v := NewBatchVectorizer[[]float32](client, 1*time.Second,
+ Settings{MaxObjectsPerBatch: 2000, MaxTokensPerBatch: maxTokensPerBatch, MaxTimePerBatch: 10, ReturnsRateLimit: true, HasTokenLimit: true},
+ logger, "test") // avoid waiting for rate limit
+ deadline := time.Now().Add(10 * time.Second)
+ if tt.deadline != 0 {
+ deadline = time.Now().Add(tt.deadline)
+ }
+
+ texts, tokenCounts := generateTokens(tt.objects)
+
+ ctx, cancl := context.WithDeadline(context.Background(), deadline)
+ vecs, errs := v.SubmitBatchAndWait(ctx, cfg, tt.skip, tokenCounts, texts)
+
+ require.Len(t, errs, len(tt.wantErrors))
+ require.Len(t, vecs, len(tt.objects))
+
+ for i := range tt.objects {
+ if tt.wantErrors[i] != nil {
+ require.Equal(t, tt.wantErrors[i], errs[i])
+ } else if tt.skip[i] {
+ require.Nil(t, vecs[i])
+ } else {
+ require.NotNil(t, vecs[i])
+ }
+ }
+ cancl()
+ })
+ }
+}
+
+func TestBatchNoRLreturn(t *testing.T) {
+ cfg := &fakeClassConfig{vectorizePropertyName: false, classConfig: map[string]interface{}{"vectorizeClassName": false}}
+ logger, _ := test.NewNullLogger()
+ cases := []struct {
+ name string
+ objects []*models.Object
+ skip []bool
+ wantErrors map[int]error
+ deadline time.Duration
+ resetRate int
+ tokenLimit int
+ }{
+ {name: "low reset time - dont deadlock", objects: []*models.Object{{Class: "Car", Properties: map[string]interface{}{"test": "more tokens than TL"}}}, skip: []bool{false}, resetRate: 0, tokenLimit: 1},
+ }
+ for _, tt := range cases {
+ t.Run(tt.name, func(t *testing.T) {
+ client := &fakeBatchClientWithoutRL[[]float32]{defaultResetRate: tt.resetRate, defaultTPM: tt.tokenLimit} // has state
+
+ v := NewBatchVectorizer(client, 1*time.Second,
+ Settings{MaxObjectsPerBatch: 2000, MaxTokensPerBatch: maxTokensPerBatch, MaxTimePerBatch: 10},
+ logger, "test") // avoid waiting for rate limit
+ deadline := time.Now().Add(10 * time.Second)
+ if tt.deadline != 0 {
+ deadline = time.Now().Add(tt.deadline)
+ }
+
+ texts, tokenCounts := generateTokens(tt.objects)
+
+ ctx, cancl := context.WithDeadline(context.Background(), deadline)
+ vecs, errs := v.SubmitBatchAndWait(ctx, cfg, tt.skip, tokenCounts, texts)
+
+ require.Len(t, errs, len(tt.wantErrors))
+ require.Len(t, vecs, len(tt.objects))
+
+ for i := range tt.objects {
+ if tt.wantErrors[i] != nil {
+ require.Equal(t, tt.wantErrors[i], errs[i])
+ } else if tt.skip[i] {
+ require.Nil(t, vecs[i])
+ } else {
+ require.NotNil(t, vecs[i])
+ }
+ }
+ cancl()
+ })
+ }
+}
+
+func TestBatchMultiple(t *testing.T) {
+ client := &fakeBatchClientWithRL[[]float32]{}
+ cfg := &fakeClassConfig{vectorizePropertyName: false, classConfig: map[string]interface{}{"vectorizeClassName": false}}
+ logger, _ := test.NewNullLogger()
+
+ v := NewBatchVectorizer[[]float32](client, 40*time.Second, Settings{MaxObjectsPerBatch: 2000, MaxTokensPerBatch: maxTokensPerBatch, MaxTimePerBatch: 10, HasTokenLimit: true, ReturnsRateLimit: true}, logger, "test") // avoid waiting for rate limit
+ res := make(chan int, 3)
+ wg := sync.WaitGroup{}
+ wg.Add(3)
+
+ // send multiple batches to the vectorizer and check if they are processed in the correct order. Note that the
+ // ObjectBatch function is doing some work before the objects are send to vectorization, so we need to leave some
+ // time to account for that
+ for i := 0; i < 3; i++ {
+ i := i
+ go func() {
+ texts, tokenCounts := generateTokens([]*models.Object{
+ {Class: "Car", Properties: map[string]interface{}{"test": "wait 100"}},
+ })
+
+ vecs, errs := v.SubmitBatchAndWait(context.Background(), cfg, []bool{false}, tokenCounts, texts)
+ require.Len(t, vecs, 1)
+ require.Len(t, errs, 0)
+ res <- i
+ wg.Done()
+ }()
+
+ time.Sleep(100 * time.Millisecond) // the vectorizer waits for 100ms with processing the object, so it is sa
+ }
+
+ wg.Wait()
+ close(res)
+ // check that the batches were processed in the correct order
+ for i := 0; i < 3; i++ {
+ require.Equal(t, i, <-res)
+ }
+}
+
+func TestBatchTimeouts(t *testing.T) {
+ client := &fakeBatchClientWithRL[[]float32]{defaultResetRate: 1}
+ cfg := &fakeClassConfig{vectorizePropertyName: false, classConfig: map[string]interface{}{"vectorizeClassName": false}}
+ logger, _ := test.NewNullLogger()
+
+ objs := []*models.Object{
+ {Class: "Car", Properties: map[string]interface{}{"test": "tokens 18"}}, // first request, set rate down so the next two items can be sent
+ {Class: "Car", Properties: map[string]interface{}{"test": "wait 200"}}, // second batch, use up batch time to trigger waiting for refresh
+ {Class: "Car", Properties: map[string]interface{}{"test": "tokens 20"}}, // set next rate so the next object is too long. Depending on the total batch time it either sleeps or not
+ {Class: "Car", Properties: map[string]interface{}{"test": "next batch long long long long long"}},
+ }
+ skip := []bool{false, false, false, false}
+
+ cases := []struct {
+ batchTime time.Duration
+ expectedErrors int
+ }{
+ {batchTime: 100 * time.Millisecond, expectedErrors: 1},
+ {batchTime: 1 * time.Second, expectedErrors: 0},
+ }
+ for _, tt := range cases {
+ t.Run(fmt.Sprint("BatchTimeouts", tt.batchTime), func(t *testing.T) {
+ v := NewBatchVectorizer[[]float32](client, tt.batchTime, Settings{MaxObjectsPerBatch: 2000, MaxTokensPerBatch: maxTokensPerBatch, MaxTimePerBatch: 10, HasTokenLimit: true, ReturnsRateLimit: true}, logger, "test") // avoid waiting for rate limit
+
+ texts, tokenCounts := generateTokens(objs)
+
+ _, errs := v.SubmitBatchAndWait(context.Background(), cfg, skip, tokenCounts, texts)
+
+ require.Len(t, errs, tt.expectedErrors)
+ })
+ }
+}
+
+func TestBatchRequestLimit(t *testing.T) {
+ client := &fakeBatchClientWithRL[[]float32]{defaultResetRate: 1}
+ cfg := &fakeClassConfig{vectorizePropertyName: false, classConfig: map[string]interface{}{"vectorizeClassName": false}}
+ longString := "ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab"
+ logger, _ := test.NewNullLogger()
+
+ objs := []*models.Object{
+ {Class: "Car", Properties: map[string]interface{}{"test": "requests 0"}}, // wait for the rate limit to reset
+ {Class: "Car", Properties: map[string]interface{}{"test": "requests 0" + longString}}, // fill up default limit of 100 tokens
+ }
+ skip := []bool{false, false, false, false}
+ texts, tokenCounts := generateTokens(objs)
+
+ cases := []struct {
+ batchTime time.Duration
+ expectedErrors int
+ }{
+ {batchTime: 100 * time.Millisecond, expectedErrors: 1},
+ {batchTime: 2 * time.Second, expectedErrors: 0},
+ }
+ for _, tt := range cases {
+ t.Run(fmt.Sprint("Test request limit with", tt.batchTime), func(t *testing.T) {
+ v := NewBatchVectorizer[[]float32](client, tt.batchTime, Settings{MaxObjectsPerBatch: 2000, MaxTokensPerBatch: maxTokensPerBatch, MaxTimePerBatch: 10, HasTokenLimit: true, ReturnsRateLimit: true}, logger, "test") // avoid waiting for rate limit
+
+ _, errs := v.SubmitBatchAndWait(context.Background(), cfg, skip, tokenCounts, texts)
+ require.Len(t, errs, tt.expectedErrors)
+ })
+ }
+}
+
+func TestBatchTokenLimitZero(t *testing.T) {
+ client := &fakeBatchClientWithRL[[]float32]{
+ defaultResetRate: 1,
+ defaultRPM: 500,
+ // token limits are all 0
+ rateLimit: &modulecomponents.RateLimits{RemainingRequests: 100, LimitRequests: 100},
+ }
+ cfg := &fakeClassConfig{vectorizePropertyName: false, classConfig: map[string]interface{}{"vectorizeClassName": false}}
+ longString := "ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab ab"
+ logger, _ := test.NewNullLogger()
+
+ objs := []*models.Object{
+ {Class: "Car", Properties: map[string]interface{}{"test": "tokens 0"}},
+ {Class: "Car", Properties: map[string]interface{}{"test": "tokens 0" + longString}},
+ }
+ skip := []bool{false, false}
+ texts, tokenCounts := generateTokens(objs)
+
+ v := NewBatchVectorizer(client, time.Second, Settings{MaxObjectsPerBatch: 2000, MaxTokensPerBatch: maxTokensPerBatch, MaxTimePerBatch: 10, HasTokenLimit: true, ReturnsRateLimit: true}, logger, "test") // avoid waiting for rate limit
+
+ _, errs := v.SubmitBatchAndWait(context.Background(), cfg, skip, tokenCounts, texts)
+ require.Len(t, errs, 0)
+ // finishes without hanging
+}
+
+func generateTokens(objects []*models.Object) ([]string, []int) {
+ texts := make([]string, len(objects))
+ tokenCounts := make([]int, len(objects))
+ // prepare input for vectorizer, and send it to the queue. Prepare here to avoid work in the queue-worker
+ for i := range objects {
+ var text string
+ props, ok := objects[i].Properties.(map[string]interface{})
+ if ok {
+ if v, ok2 := props["test"]; ok2 {
+ text = v.(string)
+ }
+ }
+ texts[i] = text
+ tokenCounts[i] = len(text)
+ }
+
+ return texts, tokenCounts
+}
+
+func TestBatchRequestMissingRLValues(t *testing.T) {
+ client := &fakeBatchClientWithRL[[]float32]{defaultResetRate: 1}
+ cfg := &fakeClassConfig{vectorizePropertyName: false, classConfig: map[string]interface{}{"vectorizeClassName": false}}
+ logger, _ := test.NewNullLogger()
+
+ v := NewBatchVectorizer[[]float32](client, time.Second, Settings{MaxObjectsPerBatch: 2000, MaxTokensPerBatch: maxTokensPerBatch, MaxTimePerBatch: 10, HasTokenLimit: true, ReturnsRateLimit: true}, logger, "test") // avoid waiting for rate limit
+ skip := []bool{false}
+
+ start := time.Now()
+ // normal batch
+ objs := []*models.Object{
+ {Class: "Car", Properties: map[string]interface{}{"test": "text"}},
+ }
+ texts, tokenCounts := generateTokens(objs)
+
+ _, errs := v.SubmitBatchAndWait(context.Background(), cfg, skip, tokenCounts, texts)
+ require.Len(t, errs, 0)
+
+ // now batch with missing values, this should not cause any waiting or failures
+ objs = []*models.Object{
+ {Class: "Car", Properties: map[string]interface{}{"test": "missingValues "}}, // first request, set rate down so the next two items can be sent
+ }
+ texts, tokenCounts = generateTokens(objs)
+
+ _, errs = v.SubmitBatchAndWait(context.Background(), cfg, skip, tokenCounts, texts)
+ require.Len(t, errs, 0)
+
+ // normal batch that is unaffected by the change
+ objs = []*models.Object{
+ {Class: "Car", Properties: map[string]interface{}{"test": "text"}},
+ }
+ texts, tokenCounts = generateTokens(objs)
+ _, errs = v.SubmitBatchAndWait(context.Background(), cfg, skip, tokenCounts, texts)
+ require.Len(t, errs, 0)
+ // refresh rate is 1s. If the missing values would have any effect the batch algo would wait for the refresh to happen
+ require.Less(t, time.Since(start), time.Millisecond*900)
+}
+
+func TestEncoderCache(t *testing.T) {
+ cache := NewEncoderCache()
+
+ modelString := "text-embedding-ada-002"
+ wg := sync.WaitGroup{}
+ for i := 0; i < 10; i++ {
+ wg.Add(2)
+ go func() {
+ tke, err := tiktoken.EncodingForModel(modelString)
+ require.NoError(t, err)
+ cache.Set(modelString, tke)
+ wg.Done()
+ }()
+
+ go func() {
+ cache.Get(modelString)
+ wg.Done()
+ }()
+ }
+
+ wg.Wait()
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/batch/fakes_for_test.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/batch/fakes_for_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..631500ea76b68d344f112625a3a62f5d57f790b1
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/batch/fakes_for_test.go
@@ -0,0 +1,222 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package batch
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/weaviate/weaviate/entities/moduletools"
+ "github.com/weaviate/weaviate/entities/schema"
+ "github.com/weaviate/weaviate/usecases/config"
+ "github.com/weaviate/weaviate/usecases/modulecomponents"
+)
+
+type fakeBatchClientWithRL[T []float32] struct {
+ defaultResetRate int
+ defaultRPM int
+ defaultTPM int
+ rateLimit *modulecomponents.RateLimits
+ sync.Mutex
+}
+
+func (c *fakeBatchClientWithRL[T]) Vectorize(ctx context.Context,
+ text []string, cfg moduletools.ClassConfig,
+) (*modulecomponents.VectorizationResult[T], *modulecomponents.RateLimits, int, error) {
+ c.Lock()
+ defer c.Unlock()
+
+ if c.defaultResetRate == 0 {
+ c.defaultResetRate = 60
+ }
+
+ var reqError error
+
+ vectors := make([]T, len(text))
+ errors := make([]error, len(text))
+ if c.rateLimit == nil {
+ c.rateLimit = &modulecomponents.RateLimits{LastOverwrite: time.Now(), RemainingTokens: 100, RemainingRequests: 100, LimitTokens: 200, ResetTokens: time.Now().Add(time.Duration(c.defaultResetRate) * time.Second), ResetRequests: time.Now().Add(time.Duration(c.defaultResetRate) * time.Second)}
+ } else if c.rateLimit.UpdateWithMissingValues {
+ // return original values
+ c.rateLimit.UpdateWithMissingValues = false
+ c.rateLimit.RemainingTokens = 100
+ c.rateLimit.RemainingRequests = 100
+ } else {
+ c.rateLimit.ResetTokens = time.Now().Add(time.Duration(c.defaultResetRate) * time.Second)
+ }
+
+ for i := range text {
+ if len(text[i]) >= len("error ") && text[i][:6] == "error " {
+ errors[i] = fmt.Errorf("%s", text[i][6:])
+ continue
+ }
+
+ tok := len("tokens ")
+ if len(text[i]) >= tok && text[i][:tok] == "tokens " {
+ rate, _ := strconv.Atoi(text[i][tok:])
+ c.rateLimit.RemainingTokens = rate
+ c.rateLimit.LimitTokens = 2 * rate
+ } else if req := len("requests "); len(text[i]) >= req && text[i][:req] == "requests " {
+ reqs, _ := strconv.Atoi(strings.Split(text[i][req:], " ")[0])
+ c.rateLimit.RemainingRequests = reqs
+ c.rateLimit.LimitRequests = 2 * reqs
+ } else if reqErr := len("ReqError "); len(text[i]) >= reqErr && text[i][:reqErr] == "ReqError " {
+ reqError = fmt.Errorf("%v", strings.Split(text[i][reqErr:], " ")[0])
+ } else if len(text[i]) >= len("wait ") && text[i][:5] == "wait " {
+ wait, _ := strconv.Atoi(text[i][5:])
+ time.Sleep(time.Duration(wait) * time.Millisecond)
+ } else if len(text[i]) >= len("missingValues ") && text[i][:14] == "missingValues " {
+ c.rateLimit.UpdateWithMissingValues = true
+ c.rateLimit.RemainingTokens = -1
+ c.rateLimit.RemainingRequests = -1
+ } else {
+ // refresh the remaining token
+ secondsSinceLastRefresh := time.Since(c.rateLimit.LastOverwrite)
+ fraction := secondsSinceLastRefresh.Seconds() / time.Until(c.rateLimit.ResetTokens).Seconds()
+ if fraction > 1 {
+ c.rateLimit.RemainingTokens = c.rateLimit.LimitTokens
+ } else {
+ c.rateLimit.RemainingTokens += int(float64(c.rateLimit.LimitTokens) * fraction / float64(c.defaultResetRate))
+ }
+ if len(text[i]) > c.rateLimit.LimitTokens || len(text[i]) > c.rateLimit.RemainingTokens {
+ errors[i] = fmt.Errorf("text too long for vectorization from provider: got %v, total limit: %v, remaining: %v", len(text[i]), c.rateLimit.LimitTokens, c.rateLimit.RemainingTokens)
+ }
+
+ }
+ vectors[i] = []float32{0, 1, 2, 3}
+ }
+ c.rateLimit.LastOverwrite = time.Now()
+ return &modulecomponents.VectorizationResult[T]{
+ Vector: vectors,
+ Dimensions: 4,
+ Text: text,
+ Errors: errors,
+ }, &modulecomponents.RateLimits{
+ LastOverwrite: c.rateLimit.LastOverwrite,
+ RemainingTokens: c.rateLimit.RemainingTokens,
+ RemainingRequests: c.rateLimit.RemainingRequests,
+ LimitTokens: c.rateLimit.LimitTokens,
+ ResetTokens: c.rateLimit.ResetTokens,
+ ResetRequests: c.rateLimit.ResetRequests,
+ LimitRequests: c.rateLimit.LimitRequests,
+ UpdateWithMissingValues: c.rateLimit.UpdateWithMissingValues,
+ }, 0, reqError
+}
+
+func (c *fakeBatchClientWithRL[T]) GetVectorizerRateLimit(ctx context.Context, cfg moduletools.ClassConfig) *modulecomponents.RateLimits {
+ return &modulecomponents.RateLimits{RemainingTokens: c.defaultTPM, RemainingRequests: c.defaultRPM, LimitTokens: c.defaultTPM, LimitRequests: c.defaultRPM, ResetTokens: time.Now().Add(time.Duration(c.defaultResetRate) * time.Second), ResetRequests: time.Now().Add(time.Duration(c.defaultResetRate) * time.Second)}
+}
+
+func (c *fakeBatchClientWithRL[T]) GetApiKeyHash(ctx context.Context, cfg moduletools.ClassConfig) [32]byte {
+ return [32]byte{}
+}
+
+type fakeBatchClientWithoutRL[T []float32] struct {
+ defaultResetRate int
+ defaultRPM int
+ defaultTPM int
+}
+
+func (c *fakeBatchClientWithoutRL[T]) Vectorize(ctx context.Context,
+ text []string, cfg moduletools.ClassConfig,
+) (*modulecomponents.VectorizationResult[T], *modulecomponents.RateLimits, int, error) {
+ if c.defaultResetRate == 0 {
+ c.defaultResetRate = 60
+ }
+
+ var reqError error
+
+ vectors := make([]T, len(text))
+ errors := make([]error, len(text))
+ for i := range text {
+ if len(text[i]) >= len("error ") && text[i][:6] == "error " {
+ errors[i] = fmt.Errorf("%s", text[i][6:])
+ continue
+ }
+
+ if reqErr := len("ReqError "); len(text[i]) >= reqErr && text[i][:reqErr] == "ReqError " {
+ reqError = fmt.Errorf("%v", strings.Split(text[i][reqErr:], " ")[0])
+ } else if len(text[i]) >= len("wait ") && text[i][:5] == "wait " {
+ wait, _ := strconv.Atoi(text[i][5:])
+ time.Sleep(time.Duration(wait) * time.Millisecond)
+ }
+ vectors[i] = []float32{0, 1, 2, 3}
+ }
+ return &modulecomponents.VectorizationResult[T]{
+ Vector: vectors,
+ Dimensions: 4,
+ Text: text,
+ Errors: errors,
+ }, nil, 0, reqError
+}
+
+func (c *fakeBatchClientWithoutRL[T]) GetVectorizerRateLimit(ctx context.Context, cfg moduletools.ClassConfig) *modulecomponents.RateLimits {
+ return &modulecomponents.RateLimits{RemainingTokens: c.defaultTPM, RemainingRequests: c.defaultRPM, LimitTokens: c.defaultTPM, LimitRequests: c.defaultRPM, ResetTokens: time.Now().Add(time.Duration(c.defaultResetRate) * time.Second), ResetRequests: time.Now().Add(time.Duration(c.defaultResetRate) * time.Second)}
+}
+
+func (c *fakeBatchClientWithoutRL[T]) GetApiKeyHash(ctx context.Context, cfg moduletools.ClassConfig) [32]byte {
+ return [32]byte{}
+}
+
+type fakeClassConfig struct {
+ classConfig map[string]interface{}
+ vectorizePropertyName bool
+ skippedProperty string
+ excludedProperty string
+}
+
+func (f fakeClassConfig) Class() map[string]interface{} {
+ return f.classConfig
+}
+
+func (f fakeClassConfig) ClassByModuleName(moduleName string) map[string]interface{} {
+ return f.classConfig
+}
+
+func (f fakeClassConfig) Property(propName string) map[string]interface{} {
+ if propName == f.skippedProperty {
+ return map[string]interface{}{
+ "skip": true,
+ }
+ }
+ if propName == f.excludedProperty {
+ return map[string]interface{}{
+ "vectorizePropertyName": false,
+ }
+ }
+ if f.vectorizePropertyName {
+ return map[string]interface{}{
+ "vectorizePropertyName": true,
+ }
+ }
+ return nil
+}
+
+func (f fakeClassConfig) Tenant() string {
+ return ""
+}
+
+func (f fakeClassConfig) TargetVector() string {
+ return ""
+}
+
+func (f fakeClassConfig) PropertiesDataTypes() map[string]schema.DataType {
+ return nil
+}
+
+func (f fakeClassConfig) Config() *config.Config {
+ return nil
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/batch/tokenization.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/batch/tokenization.go
new file mode 100644
index 0000000000000000000000000000000000000000..1b28ab68aa09cc72514f94f0b81c657fc5a7e104
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/batch/tokenization.go
@@ -0,0 +1,97 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package batch
+
+import (
+ "context"
+ "sync"
+
+ "github.com/weaviate/weaviate/usecases/modulecomponents/settings"
+
+ "github.com/weaviate/weaviate/entities/moduletools"
+
+ "github.com/weaviate/tiktoken-go"
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/modules/text2vec-openai/clients"
+ objectsvectorizer "github.com/weaviate/weaviate/usecases/modulecomponents/vectorizer"
+)
+
+type EncoderCache struct {
+ lock sync.RWMutex
+ cache map[string]*tiktoken.Tiktoken
+}
+
+func NewEncoderCache() *EncoderCache {
+ return &EncoderCache{cache: make(map[string]*tiktoken.Tiktoken), lock: sync.RWMutex{}}
+}
+
+func (e *EncoderCache) Get(model string) (*tiktoken.Tiktoken, bool) {
+ e.lock.RLock()
+ defer e.lock.RUnlock()
+ tke, ok := e.cache[model]
+ return tke, ok
+}
+
+func (e *EncoderCache) Set(model string, tk *tiktoken.Tiktoken) {
+ e.lock.Lock()
+ defer e.lock.Unlock()
+ e.cache[model] = tk
+}
+
+type TokenizerFuncType func(ctx context.Context, objects []*models.Object, skipObject []bool, cfg moduletools.ClassConfig, objectVectorizer *objectsvectorizer.ObjectVectorizer, encoderCache *EncoderCache) ([]string, []int, bool, error)
+
+func ReturnBatchTokenizer(multiplier float32, moduleName string, lowerCaseInput bool) TokenizerFuncType {
+ return func(ctx context.Context, objects []*models.Object, skipObject []bool, cfg moduletools.ClassConfig, objectVectorizer *objectsvectorizer.ObjectVectorizer, encoderCache *EncoderCache) ([]string, []int, bool, error) {
+ texts := make([]string, len(objects))
+ tokenCounts := make([]int, len(objects))
+ var tke *tiktoken.Tiktoken
+ icheck := settings.NewBaseClassSettings(cfg, lowerCaseInput)
+ modelString := modelToModelString(icheck.Model(), moduleName)
+ if multiplier > 0 {
+ var err error
+ // creating the tokenizer is quite expensive => cache for each module
+ if tke2, ok := encoderCache.Get(modelString); ok {
+ tke = tke2
+ } else {
+ tke, err = tiktoken.EncodingForModel(modelString)
+ if err != nil {
+ tke, _ = tiktoken.EncodingForModel("text-embedding-ada-002")
+ }
+ encoderCache.Set(modelString, tke)
+ }
+ }
+
+ // prepare input for vectorizer, and send it to the queue. Prepare here to avoid work in the queue-worker
+ skipAll := true
+ for i := range texts {
+ if skipObject[i] {
+ continue
+ }
+ skipAll = false
+ text := objectVectorizer.Texts(ctx, objects[i], icheck)
+ texts[i] = text
+ if multiplier > 0 {
+ tokenCounts[i] = int(float32(clients.GetTokensCount(modelString, text, tke)) * multiplier)
+ }
+ }
+ return texts, tokenCounts, skipAll, nil
+ }
+}
+
+func modelToModelString(model, moduleName string) string {
+ if moduleName == "text2vec-openai" {
+ if model == "ada" {
+ return "text-embedding-ada-002"
+ }
+ }
+ return model
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/ent/rank_result.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/ent/rank_result.go
new file mode 100644
index 0000000000000000000000000000000000000000..eab342ab6ef726176e41c97f046a5692c7653c5d
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/ent/rank_result.go
@@ -0,0 +1,22 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package ent
+
+type RankResult struct {
+ Query string
+ DocumentScores []DocumentScore
+}
+
+type DocumentScore struct {
+ Document string
+ Score float64
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/ent/usage.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/ent/usage.go
new file mode 100644
index 0000000000000000000000000000000000000000..40d2be054432a169f0c5b4c324741dc91d576e47
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/ent/usage.go
@@ -0,0 +1,25 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package ent
+
+type Usage struct {
+ CompletionTokens int `json:"completion_tokens,omitempty"`
+ PromptTokens int `json:"prompt_tokens,omitempty"`
+ TotalTokens int `json:"total_tokens,omitempty"`
+}
+
+func GetTotalTokens(usage *Usage) int {
+ if usage == nil {
+ return -1
+ }
+ return usage.TotalTokens
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/generative/generative.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/generative/generative.go
new file mode 100644
index 0000000000000000000000000000000000000000..265a9f10f7f19af6b507c09ac073d0a1351ad571
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/generative/generative.go
@@ -0,0 +1,102 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package generative
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/pkg/errors"
+ "github.com/weaviate/weaviate/entities/modulecapabilities"
+)
+
+var compile, _ = regexp.Compile(`{([\w\s]*?)}`)
+
+func Text(properties *modulecapabilities.GenerateProperties) map[string]string {
+ if properties != nil && len(properties.Text) > 0 {
+ return properties.Text
+ }
+ return nil
+}
+
+func Texts(properties []*modulecapabilities.GenerateProperties) []map[string]string {
+ texts := make([]map[string]string, 0, len(properties))
+ for _, prop := range properties {
+ if prop != nil && len(prop.Text) > 0 {
+ texts = append(texts, prop.Text)
+ }
+ }
+ return texts
+}
+
+func Blobs(properties []*modulecapabilities.GenerateProperties) []map[string]*string {
+ blobs := make([]map[string]*string, 0, len(properties))
+ for _, prop := range properties {
+ if prop != nil && len(prop.Blob) > 0 {
+ blobs = append(blobs, prop.Blob)
+ }
+ }
+ return blobs
+}
+
+// ParseImageProperties parses the user-supplied base64 images in inputImages and server-stored base64 images in storedImagePropertiesArray based on the inputImageProperties.
+//
+// It returns a slice of pointers to base64 strings in order to optimise for memory usage when dealing with large images.
+func ParseImageProperties(inputBase64Images []*string, inputImagePropertyNames []string, storedBase64ImagesArray []map[string]*string) []*string {
+ images := []*string{}
+ if len(storedBase64ImagesArray) > 0 {
+ for _, storedBase64Images := range storedBase64ImagesArray {
+ for _, inputImagePropertyName := range inputImagePropertyNames {
+ images = append(images, storedBase64Images[inputImagePropertyName])
+ }
+ }
+ }
+ images = append(images, inputBase64Images...)
+ return images
+}
+
+func MakeTaskPrompt(textProperties []map[string]string, task string) (string, error) {
+ marshal, err := json.Marshal(textProperties)
+ if err != nil {
+ return "", errors.Wrap(err, "marshal text properties")
+ }
+ task = compile.ReplaceAllStringFunc(task, func(match string) string {
+ match = strings.Trim(match, "{}")
+ for _, textProperty := range textProperties {
+ if val, ok := textProperty[match]; ok {
+ return val
+ }
+ }
+ return match
+ })
+ if len(marshal) > 0 {
+ return fmt.Sprintf("%s: %s", task, marshal), nil
+ }
+ return fmt.Sprint(task), nil
+}
+
+func MakeSinglePrompt(textProperties map[string]string, prompt string) (string, error) {
+ all := compile.FindAll([]byte(prompt), -1)
+ for _, match := range all {
+ originalProperty := string(match)
+ replacedProperty := compile.FindStringSubmatch(originalProperty)[1]
+ replacedProperty = strings.TrimSpace(replacedProperty)
+ value := textProperties[replacedProperty]
+ if value == "" {
+ return "", errors.Errorf("Following property has empty value: '%v'. Make sure you spell the property name correctly, verify that the property exists and has a value", replacedProperty)
+ }
+ prompt = strings.ReplaceAll(prompt, originalProperty, value)
+ }
+ return prompt, nil
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/generative/generative_test.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/generative/generative_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..760ef47d1ee282d838cc66c5e31b2b3ce880f2cf
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/generative/generative_test.go
@@ -0,0 +1,30 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package generative
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func Test_MakeTaskPrompt(t *testing.T) {
+ prompt, err := MakeTaskPrompt([]map[string]string{{"title": "A Grand Day Out"}}, "Create a story based on the following properties")
+ require.Nil(t, err)
+ require.Equal(t, "Create a story based on the following properties: [{\"title\":\"A Grand Day Out\"}]", prompt)
+}
+
+func Test_MakeSinglePrompt(t *testing.T) {
+ prompt, err := MakeSinglePrompt(map[string]string{"title": "A Grand Day Out"}, "Create a story based on \"{title}\"")
+ require.Nil(t, err)
+ require.Equal(t, "Create a story based on \"A Grand Day Out\"", prompt)
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/generictypes/generic_types.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/generictypes/generic_types.go
new file mode 100644
index 0000000000000000000000000000000000000000..2f6d54eb3bfc8dd54c0d2ab912d3006bec20b327
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/generictypes/generic_types.go
@@ -0,0 +1,85 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package generictypes
+
+import (
+ "context"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/weaviate/weaviate/entities/dto"
+ "github.com/weaviate/weaviate/entities/modulecapabilities"
+ "github.com/weaviate/weaviate/entities/moduletools"
+)
+
+type findVectorFn = func(ctx context.Context,
+ className string, id strfmt.UUID, tenant, targetVector string) ([]float32, string, error)
+
+type findMultiVectorFn = func(ctx context.Context,
+ className string, id strfmt.UUID, tenant, targetVector string) ([][]float32, string, error)
+
+// Helper method for creating modulecapabilities.FindVectorFn[[]float32]
+func FindVectorFn(findVectorFn findVectorFn) modulecapabilities.FindVectorFn[[]float32] {
+ return &findVector[[]float32]{findVectorFn}
+}
+
+func FindMultiVectorFn(findMultiVectorFn findMultiVectorFn) modulecapabilities.FindVectorFn[[][]float32] {
+ return &findVector[[][]float32]{findMultiVectorFn}
+}
+
+type multiFindVectorFn = func(ctx context.Context,
+ className string, id strfmt.UUID, tenant, targetVector string) ([][]float32, string, error)
+
+// Helper method for creating modulecapabilities.FindVectorFn[[][]float32]
+func MultiFindVectorFn(multiFindVectorFn multiFindVectorFn) modulecapabilities.FindVectorFn[[][]float32] {
+ return &findVector[[][]float32]{multiFindVectorFn}
+}
+
+func (f *findVector[T]) FindVector(ctx context.Context,
+ className string, id strfmt.UUID, tenant, targetVector string,
+) (T, string, error) {
+ return f.findVectorFn(ctx, className, id, tenant, targetVector)
+}
+
+type findVector[T dto.Embedding] struct {
+ findVectorFn func(ctx context.Context,
+ className string, id strfmt.UUID, tenant, targetVector string) (T, string, error)
+}
+
+type vectorForParamsFn = func(ctx context.Context, params interface{},
+ className string, findVectorFn modulecapabilities.FindVectorFn[[]float32], cfg moduletools.ClassConfig,
+) ([]float32, error)
+
+// Helper method for creating modulecapabilities.VectorForParams[[]float32]
+func VectorForParams(vectorForParamsFn vectorForParamsFn) modulecapabilities.VectorForParams[[]float32] {
+ return &vectorForParams[[]float32]{vectorForParamsFn}
+}
+
+type multiVectorForParamsFn = func(ctx context.Context, params interface{},
+ className string, findVectorFn modulecapabilities.FindVectorFn[[][]float32], cfg moduletools.ClassConfig,
+) ([][]float32, error)
+
+// Helper method for creating modulecapabilities.VectorForParams[[][]float32]
+func MultiVectorForParams(multiVectorForParamsFn multiVectorForParamsFn) modulecapabilities.VectorForParams[[][]float32] {
+ return &vectorForParams[[][]float32]{multiVectorForParamsFn}
+}
+
+type vectorForParams[T dto.Embedding] struct {
+ vectorForParams func(ctx context.Context, params interface{},
+ className string, findVectorFn modulecapabilities.FindVectorFn[T], cfg moduletools.ClassConfig,
+ ) (T, error)
+}
+
+func (v *vectorForParams[T]) VectorForParams(ctx context.Context, params interface{},
+ className string, findVectorFn modulecapabilities.FindVectorFn[T], cfg moduletools.ClassConfig,
+) (T, error) {
+ return v.vectorForParams(ctx, params, className, findVectorFn, cfg)
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/gqlparser/gqlparser.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/gqlparser/gqlparser.go
new file mode 100644
index 0000000000000000000000000000000000000000..d3bb223a88132f99f10e194cd03c1baf4d70eeaf
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/gqlparser/gqlparser.go
@@ -0,0 +1,103 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package gqlparser
+
+import (
+ "strconv"
+
+ "github.com/tailor-inc/graphql/language/ast"
+)
+
+func GetValueAsString(f *ast.ObjectField) *string {
+ asString, ok := f.Value.GetValue().(string)
+ if ok {
+ return &asString
+ }
+ return nil
+}
+
+func GetValueAsStringOrEmpty(f *ast.ObjectField) string {
+ if asString := GetValueAsString(f); asString != nil {
+ return *asString
+ }
+ return ""
+}
+
+func GetValueAsFloat64(f *ast.ObjectField) *float64 {
+ asString := f.Value.GetValue().(string)
+ if asFloat64, err := strconv.ParseFloat(asString, 64); err == nil {
+ return &asFloat64
+ }
+ return nil
+}
+
+func GetValueAsInt64(f *ast.ObjectField) *int64 {
+ asString := f.Value.GetValue().(string)
+ if asInt64, err := strconv.ParseInt(asString, 10, 64); err == nil {
+ return &asInt64
+ }
+ return nil
+}
+
+func GetValueAsInt(f *ast.ObjectField) *int {
+ asString := f.Value.GetValue().(string)
+ if asInt, err := strconv.Atoi(asString); err == nil {
+ return &asInt
+ }
+ return nil
+}
+
+func GetValueAsStringArray(f *ast.ObjectField) []string {
+ switch vals := f.Value.GetValue().(type) {
+ case string:
+ return []string{vals}
+ case []ast.Value:
+ var stopSequences []string
+ for _, val := range vals {
+ stopSequences = append(stopSequences, val.GetValue().(string))
+ }
+ return stopSequences
+ default:
+ return nil
+ }
+}
+
+func GetValueAsStringPtrArray(f *ast.ObjectField) []*string {
+ switch vals := f.Value.GetValue().(type) {
+ case string:
+ return []*string{&vals}
+ case []ast.Value:
+ var values []*string
+ for _, val := range vals {
+ value := val.GetValue().(string)
+ values = append(values, &value)
+ }
+ return values
+ default:
+ return nil
+ }
+}
+
+func GetValueAsBool(f *ast.ObjectField) *bool {
+ asBool, ok := f.Value.GetValue().(bool)
+ if ok {
+ return &asBool
+ }
+ return nil
+}
+
+func GetValueAsBoolOrFalse(f *ast.ObjectField) bool {
+ if asBool := GetValueAsBool(f); asBool != nil {
+ return *asBool
+ }
+ return false
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/settings/base_class_settings.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/settings/base_class_settings.go
new file mode 100644
index 0000000000000000000000000000000000000000..b0d79d1e3c340f232bc5c1b8c2ec62ea51822ab9
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/settings/base_class_settings.go
@@ -0,0 +1,320 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package settings
+
+import (
+ "errors"
+ "fmt"
+ "slices"
+
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/entities/schema"
+
+ "github.com/weaviate/weaviate/entities/moduletools"
+)
+
+const (
+ DefaultPropertyIndexed = true
+ DefaultVectorizeClassName = true
+ DefaultVectorizePropertyName = false
+)
+
+var errInvalidProperties = fmt.Errorf("invalid properties: didn't find a single property which is " +
+ "vectorizable and is not excluded from indexing. " +
+ "To fix this add a vectorizable property which is not excluded from indexing")
+
+type BaseClassSettings struct {
+ cfg moduletools.ClassConfig
+ propertyHelper *classPropertyValuesHelper
+ lowerCaseInput bool
+ modelParameterNames []string
+}
+
+func NewBaseClassSettings(cfg moduletools.ClassConfig, lowerCaseInput bool) *BaseClassSettings {
+ return &BaseClassSettings{cfg: cfg, propertyHelper: &classPropertyValuesHelper{}, lowerCaseInput: lowerCaseInput}
+}
+
+func NewBaseClassSettingsWithAltNames(cfg moduletools.ClassConfig,
+ lowerCaseInput bool, moduleName string, altNames []string, customModelParameterName []string,
+) *BaseClassSettings {
+ modelParameters := append(customModelParameterName, "model")
+
+ return &BaseClassSettings{
+ cfg: cfg,
+ propertyHelper: &classPropertyValuesHelper{moduleName: moduleName, altNames: altNames},
+ lowerCaseInput: lowerCaseInput,
+ modelParameterNames: modelParameters,
+ }
+}
+
+func NewBaseClassSettingsWithCustomModel(cfg moduletools.ClassConfig, lowerCaseInput bool, customModelParameterName string) *BaseClassSettings {
+ return &BaseClassSettings{
+ cfg: cfg,
+ propertyHelper: &classPropertyValuesHelper{},
+ lowerCaseInput: lowerCaseInput,
+ modelParameterNames: []string{"model", customModelParameterName},
+ }
+}
+
+func (s BaseClassSettings) LowerCaseInput() bool {
+ return s.lowerCaseInput
+}
+
+func (s BaseClassSettings) PropertyIndexed(propName string) bool {
+ if s.cfg == nil {
+ return DefaultPropertyIndexed
+ }
+
+ if len(s.Properties()) > 0 {
+ return s.isPropertyIndexed(propName)
+ }
+
+ vcn, ok := s.cfg.Property(propName)["skip"]
+ if !ok {
+ return DefaultPropertyIndexed
+ }
+
+ asBool, ok := vcn.(bool)
+ if !ok {
+ return DefaultPropertyIndexed
+ }
+
+ return !asBool
+}
+
+func (s BaseClassSettings) hasSourceProperties() bool {
+ return s.cfg != nil && len(s.Properties()) > 0
+}
+
+func (s BaseClassSettings) VectorizePropertyName(propName string) bool {
+ if s.cfg == nil {
+ return DefaultVectorizePropertyName
+ }
+
+ vcn, ok := s.cfg.Property(propName)["vectorizePropertyName"]
+ if !ok {
+ return DefaultVectorizePropertyName
+ }
+
+ asBool, ok := vcn.(bool)
+ if !ok {
+ return DefaultVectorizePropertyName
+ }
+
+ return asBool
+}
+
+func (s BaseClassSettings) VectorizeClassName() bool {
+ if s.cfg == nil {
+ return DefaultVectorizeClassName
+ }
+
+ vcn, ok := s.GetSettings()["vectorizeClassName"]
+ if !ok {
+ return DefaultVectorizeClassName
+ }
+
+ asBool, ok := vcn.(bool)
+ if !ok {
+ return DefaultVectorizeClassName
+ }
+
+ return asBool
+}
+
+func (s BaseClassSettings) Properties() []string {
+ if s.cfg == nil || len(s.GetSettings()) == 0 {
+ return nil
+ }
+
+ field, ok := s.GetSettings()["properties"]
+ if !ok {
+ return nil
+ }
+
+ asArray, ok := field.([]any)
+ if ok {
+ asStringArray := make([]string, len(asArray))
+ for i := range asArray {
+ asStringArray[i] = asArray[i].(string)
+ }
+ return asStringArray
+ }
+
+ asStringArray, ok := field.([]string)
+ if ok {
+ return asStringArray
+ }
+
+ return nil
+}
+
+func (s BaseClassSettings) Model() string {
+ if s.cfg == nil || len(s.GetSettings()) == 0 {
+ return ""
+ }
+
+ for _, parameterName := range s.modelParameterNames {
+ if model, ok := s.GetSettings()[parameterName]; ok {
+ return model.(string)
+ }
+ }
+
+ return ""
+}
+
+func (s BaseClassSettings) ValidateClassSettings() error {
+ if s.cfg != nil && len(s.GetSettings()) > 0 {
+ if field, ok := s.GetSettings()["properties"]; ok {
+ fieldsArray, fieldsArrayOk := field.([]any)
+ if fieldsArrayOk {
+ if len(fieldsArray) == 0 {
+ return errors.New("properties field needs to have at least 1 property defined")
+ }
+ for _, value := range fieldsArray {
+ _, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("properties field value: %v must be a string", value)
+ }
+ }
+ }
+ stringArray, stringArrayOk := field.([]string)
+ if stringArrayOk && len(stringArray) == 0 {
+ return errors.New("properties field needs to have at least 1 property defined")
+ }
+ if !fieldsArrayOk && !stringArrayOk {
+ return fmt.Errorf("properties field needs to be of array type, got: %T", field)
+ }
+ }
+ }
+ return nil
+}
+
+func (s BaseClassSettings) isPropertyIndexed(propName string) bool {
+ for _, name := range s.Properties() {
+ if propName == name {
+ return true
+ }
+ }
+ return false
+}
+
+func (s BaseClassSettings) GetPropertyAsInt64(name string, defaultValue *int64) *int64 {
+ return s.propertyHelper.GetPropertyAsInt64(s.cfg, name, defaultValue)
+}
+
+func (s BaseClassSettings) GetPropertyAsString(name, defaultValue string) string {
+ return s.propertyHelper.GetPropertyAsString(s.cfg, name, defaultValue)
+}
+
+func (s BaseClassSettings) GetPropertyAsBool(name string, defaultValue bool) bool {
+ return s.propertyHelper.GetPropertyAsBool(s.cfg, name, defaultValue)
+}
+
+func (s BaseClassSettings) GetNumber(in interface{}) (float32, error) {
+ return s.propertyHelper.GetNumber(in)
+}
+
+func (s BaseClassSettings) ValidateIndexState(class *models.Class) error {
+ if s.VectorizeClassName() {
+ // if the user chooses to vectorize the classname, vector-building will
+ // always be possible, no need to investigate further
+
+ return nil
+ }
+
+ // search if there is at least one indexed, string/text prop. If found pass
+ // validation
+ for _, prop := range class.Properties {
+ if len(prop.DataType) < 1 {
+ return fmt.Errorf("property %s must have at least one datatype: "+
+ "got %v", prop.Name, prop.DataType)
+ }
+
+ if !s.isPropertyDataTypeSupported(prop.DataType[0]) {
+ // we can only vectorize text-like props
+ continue
+ }
+
+ if s.PropertyIndexed(prop.Name) {
+ // found at least one, this is a valid schema
+ return nil
+ }
+ }
+
+ return errInvalidProperties
+}
+
+func (s BaseClassSettings) GetSettings() map[string]interface{} {
+ if s.cfg == nil || s.propertyHelper == nil {
+ return nil
+ }
+ return s.propertyHelper.GetSettings(s.cfg)
+}
+
+func (s BaseClassSettings) Validate(class *models.Class) error {
+ if s.cfg == nil {
+ // we would receive a nil-config on cross-class requests, such as Explore{}
+ return errors.New("empty config")
+ }
+
+ // validate properties setting if it's a right format only if it's defined
+ if err := s.ValidateClassSettings(); err != nil {
+ return err
+ }
+
+ if !s.isAutoSchemaEnabled() {
+ // validate class's properties against properties
+ err := s.ValidateIndexState(class)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s BaseClassSettings) isAutoSchemaEnabled() bool {
+ if s.cfg != nil && s.cfg.Config() != nil && s.cfg.Config().AutoSchema.Enabled != nil {
+ return s.cfg.Config().AutoSchema.Enabled.Get()
+ }
+ return false
+}
+
+func (s BaseClassSettings) isPropertyDataTypeSupported(dt string) bool {
+ switch schema.DataType(dt) {
+ case schema.DataTypeText, schema.DataTypeString, schema.DataTypeTextArray, schema.DataTypeStringArray:
+ return true
+ default:
+ // do nothing
+ }
+ if s.hasSourceProperties() {
+ // include additional property types
+ switch schema.DataType(dt) {
+ case schema.DataTypeObject, schema.DataTypeObjectArray:
+ return true
+ case schema.DataTypeInt, schema.DataTypeNumber, schema.DataTypeIntArray, schema.DataTypeNumberArray:
+ return true
+ case schema.DataTypeDate, schema.DataTypeDateArray:
+ return true
+ case schema.DataTypeBoolean, schema.DataTypeBooleanArray:
+ return true
+ default:
+ // do nothing
+ }
+ }
+ return false
+}
+
+func ValidateSetting[T string | int64](value T, availableValues []T) bool {
+ return slices.Contains(availableValues, value)
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/settings/base_class_settings_test.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/settings/base_class_settings_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..5fd7b18f1cb62ffb1f204a452ff4c9958ac4e9f2
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/settings/base_class_settings_test.go
@@ -0,0 +1,403 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package settings
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/entities/schema"
+ "github.com/weaviate/weaviate/usecases/config"
+ "github.com/weaviate/weaviate/usecases/config/runtime"
+ "github.com/weaviate/weaviate/usecases/modules"
+)
+
+func Test_BaseClassSettings(t *testing.T) {
+ targetVector := "targetVector"
+ propertyToIndex := "someProp"
+ class := &models.Class{
+ Class: "MyClass",
+ VectorConfig: map[string]models.VectorConfig{
+ targetVector: {
+ Vectorizer: map[string]any{
+ "my-module": map[string]any{
+ "vectorizeClassName": false,
+ "properties": []any{propertyToIndex},
+ },
+ },
+ VectorIndexType: "hnsw",
+ },
+ },
+ Properties: []*models.Property{
+ {
+ Name: propertyToIndex,
+ ModuleConfig: map[string]any{
+ "my-module": map[string]any{
+ "skip": true,
+ "vectorizePropertyName": true,
+ },
+ },
+ },
+ {
+ Name: "otherProp",
+ },
+ },
+ }
+
+ cfg := modules.NewClassBasedModuleConfig(class, "my-module", "tenant", targetVector, nil)
+ ic := NewBaseClassSettings(cfg, false)
+
+ assert.True(t, ic.PropertyIndexed(propertyToIndex))
+ assert.True(t, ic.VectorizePropertyName(propertyToIndex))
+ assert.False(t, ic.PropertyIndexed("otherProp"))
+ assert.False(t, ic.VectorizePropertyName("otherProp"))
+ assert.False(t, ic.VectorizeClassName())
+}
+
+func Test_BaseClassSettings_ValidateClassSettings(t *testing.T) {
+ targetVector := "targetVector"
+ getClass := func(moduleSettings map[string]any) *models.Class {
+ settings := map[string]any{
+ "vectorizeClassName": false,
+ }
+ for k, v := range moduleSettings {
+ settings[k] = v
+ }
+ return &models.Class{
+ Class: "MyClass",
+ VectorConfig: map[string]models.VectorConfig{
+ targetVector: {
+ Vectorizer: map[string]any{
+ "my-module": settings,
+ },
+ VectorIndexType: "hnsw",
+ },
+ },
+ Properties: []*models.Property{
+ {
+ Name: "prop1",
+ },
+ {
+ Name: "otherProp",
+ },
+ },
+ }
+ }
+ tests := []struct {
+ name string
+ settings map[string]any
+ wantErr error
+ }{
+ {
+ name: "without properties",
+ settings: nil,
+ wantErr: nil,
+ },
+ {
+ name: "proper properties",
+ settings: map[string]any{
+ "properties": []any{"prop1"},
+ },
+ wantErr: nil,
+ },
+ {
+ name: "nil properties",
+ settings: map[string]any{
+ "properties": nil,
+ },
+ wantErr: errors.New("properties field needs to be of array type, got: "),
+ },
+ {
+ name: "at least 1 property",
+ settings: map[string]any{
+ "properties": []any{},
+ },
+ wantErr: errors.New("properties field needs to have at least 1 property defined"),
+ },
+ {
+ name: "at least 1 property with []string",
+ settings: map[string]any{
+ "properties": []string{},
+ },
+ wantErr: errors.New("properties field needs to have at least 1 property defined"),
+ },
+ {
+ name: "must be an array",
+ settings: map[string]any{
+ "properties": "string",
+ },
+ wantErr: errors.New("properties field needs to be of array type, got: string"),
+ },
+ {
+ name: "properties values need to be string",
+ settings: map[string]any{
+ "properties": []any{"string", 1},
+ },
+ wantErr: errors.New("properties field value: 1 must be a string"),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ class := getClass(tt.settings)
+ cfg := modules.NewClassBasedModuleConfig(class, "my-module", "tenant", targetVector, nil)
+ s := NewBaseClassSettings(cfg, false)
+ err := s.ValidateClassSettings()
+ if tt.wantErr != nil {
+ require.Error(t, err)
+ assert.Equal(t, err.Error(), tt.wantErr.Error())
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+ }
+}
+
+func Test_BaseClassSettings_Validate(t *testing.T) {
+ getDBConfig := func(autoSchemaEnabled bool) *config.Config {
+ var enabled *runtime.DynamicValue[bool]
+ if autoSchemaEnabled {
+ enabled = runtime.NewDynamicValue(autoSchemaEnabled)
+ }
+ cfg := config.Config{AutoSchema: config.AutoSchema{Enabled: enabled}}
+ return &cfg
+ }
+ classNoProperties := func() *models.Class {
+ return &models.Class{
+ Class: "MyClass",
+ VectorConfig: map[string]models.VectorConfig{
+ "targetVector": {
+ Vectorizer: map[string]interface{}{
+ "my-module": map[string]interface{}{
+ "vectorizeClassName": false,
+ },
+ },
+ },
+ },
+ }
+ }
+ classOnlySourceProperties := func() *models.Class {
+ return &models.Class{
+ Class: "MyClass",
+ VectorConfig: map[string]models.VectorConfig{
+ "targetVector": {
+ Vectorizer: map[string]interface{}{
+ "my-module": map[string]interface{}{
+ "vectorizeClassName": false,
+ "properties": []string{"property_should_be_created_autoschema"},
+ },
+ },
+ },
+ },
+ }
+ }
+ classPropertiesAndSourceProperties := func() *models.Class {
+ return &models.Class{
+ Class: "MyClass",
+ VectorConfig: map[string]models.VectorConfig{
+ "targetVector": {
+ Vectorizer: map[string]interface{}{
+ "my-module": map[string]interface{}{
+ "vectorizeClassName": false,
+ "properties": []string{"predefined_property"},
+ },
+ },
+ },
+ },
+ Properties: []*models.Property{
+ {
+ Name: "predefined_property",
+ DataType: []string{schema.DataTypeText.String()},
+ },
+ {
+ Name: "otherProp",
+ DataType: []string{schema.DataTypeText.String()},
+ },
+ },
+ }
+ }
+ classPropertiesAndNonExistentSourceProperties := func() *models.Class {
+ return &models.Class{
+ Class: "MyClass",
+ VectorConfig: map[string]models.VectorConfig{
+ "targetVector": {
+ Vectorizer: map[string]interface{}{
+ "my-module": map[string]interface{}{
+ "vectorizeClassName": false,
+ "properties": []string{"nonexistent_property"},
+ },
+ },
+ },
+ },
+ Properties: []*models.Property{
+ {
+ Name: "predefined_property",
+ DataType: []string{schema.DataTypeText.String()},
+ },
+ {
+ Name: "otherProp",
+ DataType: []string{schema.DataTypeText.String()},
+ },
+ },
+ }
+ }
+ classAllVectorizablePropertiesAndSourceProperties := func() *models.Class {
+ return &models.Class{
+ Class: "MyClass",
+ VectorConfig: map[string]models.VectorConfig{
+ "targetVector": {
+ Vectorizer: map[string]interface{}{
+ "my-module": map[string]interface{}{
+ "vectorizeClassName": false,
+ "properties": []string{
+ "text_prop", "text_array_prop", "string_prop", "string_array_prop",
+ "object_prop", "object_array_prop",
+ "int_prop", "int_array_prop",
+ "number_prop", "number_array_prop",
+ "date_prop", "date_array_prop",
+ "boolean_prop", "boolean_array_prop",
+ },
+ },
+ },
+ },
+ },
+ Properties: []*models.Property{
+ {
+ Name: "text_prop",
+ DataType: []string{schema.DataTypeText.String()},
+ },
+ {
+ Name: "text_array_prop",
+ DataType: []string{schema.DataTypeTextArray.String()},
+ },
+ {
+ Name: "string_prop",
+ DataType: []string{schema.DataTypeString.String()},
+ },
+ {
+ Name: "string_array_prop",
+ DataType: []string{schema.DataTypeStringArray.String()},
+ },
+ {
+ Name: "object_prop",
+ DataType: []string{schema.DataTypeObject.String()},
+ },
+ {
+ Name: "object_array_prop",
+ DataType: []string{schema.DataTypeObjectArray.String()},
+ },
+ {
+ Name: "int_prop",
+ DataType: []string{schema.DataTypeInt.String()},
+ },
+ {
+ Name: "int_array_prop",
+ DataType: []string{schema.DataTypeIntArray.String()},
+ },
+ {
+ Name: "number_prop",
+ DataType: []string{schema.DataTypeNumber.String()},
+ },
+ {
+ Name: "number_array_prop",
+ DataType: []string{schema.DataTypeNumberArray.String()},
+ },
+ {
+ Name: "date_prop",
+ DataType: []string{schema.DataTypeDate.String()},
+ },
+ {
+ Name: "date_array_prop",
+ DataType: []string{schema.DataTypeDateArray.String()},
+ },
+ {
+ Name: "boolean_prop",
+ DataType: []string{schema.DataTypeBoolean.String()},
+ },
+ {
+ Name: "boolean_array_prop",
+ DataType: []string{schema.DataTypeBooleanArray.String()},
+ },
+ },
+ }
+ }
+ tests := []struct {
+ name string
+ class *models.Class
+ autoSchemaEnabled bool
+ wantErr error
+ }{
+ {
+ name: "class with no properties, auto schema enabled",
+ class: classNoProperties(),
+ autoSchemaEnabled: true,
+ },
+ {
+ name: "class with no properties, auto schema disabled",
+ class: classNoProperties(),
+ autoSchemaEnabled: false,
+ wantErr: errInvalidProperties,
+ },
+ {
+ name: "class with only source properties, auto schema enabled",
+ class: classOnlySourceProperties(),
+ autoSchemaEnabled: true,
+ },
+ {
+ name: "class with only source properties, auto schema disabled",
+ class: classOnlySourceProperties(),
+ autoSchemaEnabled: false,
+ wantErr: errInvalidProperties,
+ },
+ {
+ name: "class with properties and source properties, auto schema enabled",
+ class: classPropertiesAndSourceProperties(),
+ autoSchemaEnabled: true,
+ },
+ {
+ name: "class with properties and source properties, auto schema disabled",
+ class: classPropertiesAndSourceProperties(),
+ autoSchemaEnabled: false,
+ },
+ {
+ name: "class with properties and non existent source properties, auto schema enabled",
+ class: classPropertiesAndNonExistentSourceProperties(),
+ autoSchemaEnabled: true,
+ },
+ {
+ name: "class with properties and non existent source properties, auto schema disabled",
+ class: classPropertiesAndNonExistentSourceProperties(),
+ autoSchemaEnabled: false,
+ wantErr: errInvalidProperties,
+ },
+ {
+ name: "class with all vectorizable property types",
+ class: classAllVectorizablePropertiesAndSourceProperties(),
+ autoSchemaEnabled: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cfg := modules.NewClassBasedModuleConfig(tt.class, "my-module", "tenant", "targetVector", getDBConfig(tt.autoSchemaEnabled))
+ ic := NewBaseClassSettings(cfg, false)
+ err := ic.Validate(tt.class)
+ if tt.wantErr != nil {
+ require.Error(t, err)
+ assert.EqualError(t, err, tt.wantErr.Error())
+ } else {
+ assert.Nil(t, err)
+ }
+ })
+ }
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/settings/class_settings_property_helper.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/settings/class_settings_property_helper.go
new file mode 100644
index 0000000000000000000000000000000000000000..ba680b0c4458cd8d1d7b1683eca05f643b1469d2
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/settings/class_settings_property_helper.go
@@ -0,0 +1,229 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package settings
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+
+ "github.com/weaviate/weaviate/entities/moduletools"
+)
+
+type PropertyValuesHelper interface {
+ GetPropertyAsInt(cfg moduletools.ClassConfig, name string, defaultValue *int) *int
+ GetPropertyAsIntWithNotExists(cfg moduletools.ClassConfig, name string, defaultValue, notExistsValue *int) *int
+ GetPropertyAsInt64(cfg moduletools.ClassConfig, name string, defaultValue *int64) *int64
+ GetPropertyAsInt64WithNotExists(cfg moduletools.ClassConfig, name string, defaultValue, notExistsValue *int64) *int64
+ GetPropertyAsFloat64(cfg moduletools.ClassConfig, name string, defaultValue *float64) *float64
+ GetPropertyAsFloat64WithNotExists(cfg moduletools.ClassConfig, name string, defaultValue, notExistsValue *float64) *float64
+ GetPropertyAsString(cfg moduletools.ClassConfig, name, defaultValue string) string
+ GetPropertyAsStringWithNotExists(cfg moduletools.ClassConfig, name, defaultValue, notExistsValue string) string
+ GetPropertyAsBool(cfg moduletools.ClassConfig, name string, defaultValue bool) bool
+ GetPropertyAsBoolWithNotExists(cfg moduletools.ClassConfig, name string, defaultValue, notExistsValue bool) bool
+ GetNumber(in interface{}) (float32, error)
+}
+
+type classPropertyValuesHelper struct {
+ moduleName string
+ altNames []string
+}
+
+func NewPropertyValuesHelper(moduleName string) PropertyValuesHelper {
+ return &classPropertyValuesHelper{moduleName: moduleName}
+}
+
+func NewPropertyValuesHelperWithAltNames(moduleName string, altNames []string) PropertyValuesHelper {
+ return &classPropertyValuesHelper{moduleName, altNames}
+}
+
+func (h *classPropertyValuesHelper) GetPropertyAsInt(cfg moduletools.ClassConfig,
+ name string, defaultValue *int,
+) *int {
+ return h.GetPropertyAsIntWithNotExists(cfg, name, defaultValue, defaultValue)
+}
+
+func (h *classPropertyValuesHelper) GetPropertyAsIntWithNotExists(cfg moduletools.ClassConfig,
+ name string, defaultValue, notExistsValue *int,
+) *int {
+ if cfg == nil {
+ // we would receive a nil-config on cross-class requests, such as Explore{}
+ return notExistsValue
+ }
+ return getNumberValue(h.GetSettings(cfg), name, defaultValue, notExistsValue)
+}
+
+func (h *classPropertyValuesHelper) GetPropertyAsInt64(cfg moduletools.ClassConfig,
+ name string, defaultValue *int64,
+) *int64 {
+ return h.GetPropertyAsInt64WithNotExists(cfg, name, defaultValue, defaultValue)
+}
+
+func (h *classPropertyValuesHelper) GetPropertyAsInt64WithNotExists(cfg moduletools.ClassConfig,
+ name string, defaultValue, notExistsValue *int64,
+) *int64 {
+ if cfg == nil {
+ // we would receive a nil-config on cross-class requests, such as Explore{}
+ return notExistsValue
+ }
+ return getNumberValue(h.GetSettings(cfg), name, defaultValue, notExistsValue)
+}
+
+func (h *classPropertyValuesHelper) GetPropertyAsFloat64(cfg moduletools.ClassConfig,
+ name string, defaultValue *float64,
+) *float64 {
+ return h.GetPropertyAsFloat64WithNotExists(cfg, name, defaultValue, defaultValue)
+}
+
+func (h *classPropertyValuesHelper) GetPropertyAsFloat64WithNotExists(cfg moduletools.ClassConfig,
+ name string, defaultValue, notExistsValue *float64,
+) *float64 {
+ if cfg == nil {
+ // we would receive a nil-config on cross-class requests, such as Explore{}
+ return notExistsValue
+ }
+ return getNumberValue(h.GetSettings(cfg), name, defaultValue, notExistsValue)
+}
+
+func (h *classPropertyValuesHelper) GetPropertyAsString(cfg moduletools.ClassConfig,
+ name, defaultValue string,
+) string {
+ return h.GetPropertyAsStringWithNotExists(cfg, name, defaultValue, defaultValue)
+}
+
+func (h *classPropertyValuesHelper) GetPropertyAsStringWithNotExists(cfg moduletools.ClassConfig,
+ name, defaultValue, notExistsValue string,
+) string {
+ if cfg == nil {
+ // we would receive a nil-config on cross-class requests, such as Explore{}
+ return notExistsValue
+ }
+
+ value := h.GetSettings(cfg)[name]
+ switch v := value.(type) {
+ case nil:
+ return notExistsValue
+ case string:
+ return v
+ default:
+ return defaultValue
+ }
+}
+
+func (h *classPropertyValuesHelper) GetPropertyAsBool(cfg moduletools.ClassConfig,
+ name string, defaultValue bool,
+) bool {
+ return h.GetPropertyAsBoolWithNotExists(cfg, name, defaultValue, defaultValue)
+}
+
+func (h *classPropertyValuesHelper) GetPropertyAsBoolWithNotExists(cfg moduletools.ClassConfig,
+ name string, defaultValue, notExistsValue bool,
+) bool {
+ if cfg == nil {
+ // we would receive a nil-config on cross-class requests, such as Explore{}
+ return notExistsValue
+ }
+
+ value := h.GetSettings(cfg)[name]
+ switch v := value.(type) {
+ case nil:
+ return notExistsValue
+ case bool:
+ return v
+ case string:
+ asBool, err := strconv.ParseBool(v)
+ if err == nil {
+ return asBool
+ }
+ return defaultValue
+ default:
+ return defaultValue
+ }
+}
+
+func (h *classPropertyValuesHelper) GetNumber(in interface{}) (float32, error) {
+ switch i := in.(type) {
+ case float64:
+ return float32(i), nil
+ case float32:
+ return i, nil
+ case int:
+ return float32(i), nil
+ case string:
+ num, err := strconv.ParseFloat(i, 64)
+ if err != nil {
+ return 0, err
+ }
+ return float32(num), err
+ case json.Number:
+ num, err := i.Float64()
+ if err != nil {
+ return 0, err
+ }
+ return float32(num), err
+ default:
+ return 0.0, fmt.Errorf("unrecognized type: %T", in)
+ }
+}
+
+func (h *classPropertyValuesHelper) GetSettings(cfg moduletools.ClassConfig) map[string]interface{} {
+ if h.moduleName != "" {
+ if settings := cfg.ClassByModuleName(h.moduleName); len(settings) > 0 {
+ return settings
+ }
+ for _, altName := range h.altNames {
+ if settings := cfg.ClassByModuleName(altName); len(settings) > 0 {
+ return settings
+ }
+ }
+ }
+ return cfg.Class()
+}
+
+func getNumberValue[T int | int64 | float64](settings map[string]interface{},
+ name string, defaultValue, notExistsValue *T,
+) *T {
+ value := settings[name]
+ switch v := value.(type) {
+ case nil:
+ return notExistsValue
+ case json.Number:
+ if asInt64V, err := v.Int64(); err == nil {
+ return asNumber[int64, T](asInt64V)
+ }
+ return defaultValue
+ case float32:
+ return asNumber[float32, T](v)
+ case float64:
+ return asNumber[float64, T](v)
+ case int:
+ return asNumber[int, T](v)
+ case int16:
+ return asNumber[int16, T](v)
+ case int32:
+ return asNumber[int32, T](v)
+ case int64:
+ return asNumber[int64, T](v)
+ case string:
+ if asInt, err := strconv.Atoi(v); err == nil {
+ return asNumber[int, T](asInt)
+ }
+ return defaultValue
+ default:
+ return defaultValue
+ }
+}
+
+func asNumber[T int | int16 | int32 | int64 | float32 | float64, R int | int64 | float64](v T) *R {
+ number := R(v)
+ return &number
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/text2vecbase/batch_vectorizer.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/text2vecbase/batch_vectorizer.go
new file mode 100644
index 0000000000000000000000000000000000000000..48fd0a62d815bbfe67ddbf5641ca6898291b3807
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/text2vecbase/batch_vectorizer.go
@@ -0,0 +1,100 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package text2vecbase
+
+import (
+ "context"
+ "time"
+
+ "github.com/pkg/errors"
+ "github.com/weaviate/weaviate/entities/dto"
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/entities/moduletools"
+ "github.com/weaviate/weaviate/usecases/modulecomponents/batch"
+ objectsvectorizer "github.com/weaviate/weaviate/usecases/modulecomponents/vectorizer"
+ "github.com/weaviate/weaviate/usecases/monitoring"
+ libvectorizer "github.com/weaviate/weaviate/usecases/vectorizer"
+)
+
+func New[T dto.Embedding](client BatchClient[T], batchVectorizer *batch.Batch[T], tokenizerFunc batch.TokenizerFuncType) *BatchVectorizer[T] {
+ return newBatchVectorizer(client, batchVectorizer, tokenizerFunc)
+}
+
+func newBatchVectorizer[T dto.Embedding](client BatchClient[T], batchVectorizer *batch.Batch[T], tokenizerFunc batch.TokenizerFuncType) *BatchVectorizer[T] {
+ vec := &BatchVectorizer[T]{
+ client: client,
+ objectVectorizer: objectsvectorizer.New(),
+ batchVectorizer: batchVectorizer,
+ tokenizerFunc: tokenizerFunc,
+ encoderCache: batch.NewEncoderCache(),
+ }
+
+ return vec
+}
+
+func (v *BatchVectorizer[T]) Object(ctx context.Context, object *models.Object, cfg moduletools.ClassConfig, cs objectsvectorizer.ClassSettings,
+) (T, models.AdditionalProperties, error) {
+ vec, err := v.object(ctx, object, cfg, cs)
+ return vec, nil, err
+}
+
+func (v *BatchVectorizer[T]) object(ctx context.Context, object *models.Object, cfg moduletools.ClassConfig, cs objectsvectorizer.ClassSettings,
+) (T, error) {
+ text := v.objectVectorizer.Texts(ctx, object, cs)
+ res, _, _, err := v.client.Vectorize(ctx, []string{text}, cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(res.Vector) > 1 {
+ return libvectorizer.CombineVectors(res.Vector), nil
+ }
+ return res.Vector[0], nil
+}
+
+func (v *BatchVectorizer[T]) ObjectBatch(ctx context.Context, objects []*models.Object, skipObject []bool, cfg moduletools.ClassConfig,
+) ([]T, map[int]error) {
+ beforeTokenization := time.Now()
+ texts, tokenCounts, skipAll, err := v.tokenizerFunc(ctx, objects, skipObject, cfg, v.objectVectorizer, v.encoderCache)
+ if err != nil {
+ errs := make(map[int]error)
+ for j := range texts {
+ errs[j] = err
+ }
+ return nil, errs
+ }
+
+ monitoring.GetMetrics().T2VBatchQueueDuration.WithLabelValues(v.batchVectorizer.Label, "tokenization").
+ Observe(time.Since(beforeTokenization).Seconds())
+
+ if skipAll {
+ return make([]T, len(objects)), make(map[int]error)
+ }
+
+ monitoring.GetMetrics().ModuleExternalBatchLength.WithLabelValues("vectorizeBatch", objects[0].Class).Observe(float64(len(objects)))
+
+ return v.batchVectorizer.SubmitBatchAndWait(ctx, cfg, skipObject, tokenCounts, texts)
+}
+
+func (v *BatchVectorizer[T]) Texts(ctx context.Context, inputs []string,
+ cfg moduletools.ClassConfig,
+) (T, error) {
+ res, err := v.client.VectorizeQuery(ctx, inputs, cfg)
+ if err != nil {
+ return nil, errors.Wrap(err, "remote client vectorize")
+ }
+
+ if len(res.Vector) > 1 {
+ return libvectorizer.CombineVectors(res.Vector), nil
+ }
+ return res.Vector[0], nil
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/text2vecbase/batch_vectorizer_test.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/text2vecbase/batch_vectorizer_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..47bfee8903ee7aa68e71f4aa74d956e2c27e32f8
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/text2vecbase/batch_vectorizer_test.go
@@ -0,0 +1,116 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package text2vecbase
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/entities/schema"
+ "github.com/weaviate/weaviate/usecases/config"
+ "github.com/weaviate/weaviate/usecases/modulecomponents/batch"
+ objectsvectorizer "github.com/weaviate/weaviate/usecases/modulecomponents/vectorizer"
+)
+
+type fakeClassConfig struct {
+ classConfig map[string]interface{}
+ vectorizeClassName bool
+ vectorizePropertyName bool
+ skippedProperty string
+ excludedProperty string
+ // module specific settings
+ cohereModel string
+ truncateType string
+ baseURL string
+}
+
+func (f fakeClassConfig) Class() map[string]interface{} {
+ classSettings := map[string]interface{}{
+ "vectorizeClassName": f.vectorizeClassName,
+ "model": f.cohereModel,
+ "truncate": f.truncateType,
+ "baseURL": f.baseURL,
+ }
+ return classSettings
+}
+
+func (f fakeClassConfig) PropertyIndexed(property string) bool {
+ return !((property == f.skippedProperty) || (property == f.excludedProperty))
+}
+
+func (f fakeClassConfig) ClassByModuleName(moduleName string) map[string]interface{} {
+ return f.classConfig
+}
+
+func (f fakeClassConfig) Property(propName string) map[string]interface{} {
+ if propName == f.skippedProperty {
+ return map[string]interface{}{
+ "skip": true,
+ }
+ }
+ if propName == f.excludedProperty {
+ return map[string]interface{}{
+ "vectorizePropertyName": false,
+ }
+ }
+ if f.vectorizePropertyName {
+ return map[string]interface{}{
+ "vectorizePropertyName": true,
+ }
+ }
+ return nil
+}
+
+func (f fakeClassConfig) Tenant() string {
+ return ""
+}
+
+func (f fakeClassConfig) TargetVector() string {
+ return ""
+}
+
+func (f fakeClassConfig) VectorizeClassName() bool {
+ return f.classConfig["vectorizeClassName"].(bool)
+}
+
+func (f fakeClassConfig) VectorizePropertyName(propertyName string) bool {
+ return f.vectorizePropertyName
+}
+
+func (f fakeClassConfig) Properties() []string {
+ return nil
+}
+
+func (f fakeClassConfig) PropertiesDataTypes() map[string]schema.DataType {
+ return nil
+}
+
+func (f fakeClassConfig) Config() *config.Config {
+ return nil
+}
+
+func BenchmarkBatchVectorizer(b *testing.B) {
+ tokenizer := batch.ReturnBatchTokenizer(1, "", false)
+ ctx := context.Background()
+ cfg := &fakeClassConfig{vectorizePropertyName: false, classConfig: map[string]interface{}{"vectorizeClassName": false}}
+
+ vectorizer := objectsvectorizer.New()
+ encoderCache := batch.NewEncoderCache()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, _, _, err := tokenizer(ctx, []*models.Object{}, []bool{false}, cfg, vectorizer, encoderCache)
+ require.NoError(b, err)
+ require.Len(b, encoderCache, 1)
+ }
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/text2vecbase/interfaces.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/text2vecbase/interfaces.go
new file mode 100644
index 0000000000000000000000000000000000000000..5b11081f738db20088231797e5b45a775f22ce91
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/text2vecbase/interfaces.go
@@ -0,0 +1,56 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package text2vecbase
+
+import (
+ "context"
+
+ "github.com/weaviate/weaviate/entities/dto"
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/entities/moduletools"
+ "github.com/weaviate/weaviate/usecases/modulecomponents"
+ "github.com/weaviate/weaviate/usecases/modulecomponents/batch"
+ objectsvectorizer "github.com/weaviate/weaviate/usecases/modulecomponents/vectorizer"
+)
+
+type TextVectorizer[T dto.Embedding] interface {
+ Object(ctx context.Context, object *models.Object,
+ cfg moduletools.ClassConfig) (T, models.AdditionalProperties, error)
+ Texts(ctx context.Context, input []string,
+ cfg moduletools.ClassConfig) (T, error)
+}
+
+type TextVectorizerBatch[T dto.Embedding] interface {
+ Texts(ctx context.Context, input []string,
+ cfg moduletools.ClassConfig) (T, error)
+ Object(ctx context.Context, object *models.Object,
+ cfg moduletools.ClassConfig, cs objectsvectorizer.ClassSettings) (T, models.AdditionalProperties, error)
+ ObjectBatch(ctx context.Context, objects []*models.Object, skipObject []bool, cfg moduletools.ClassConfig) ([]T, map[int]error)
+}
+
+type MetaProvider interface {
+ MetaInfo() (map[string]interface{}, error)
+}
+
+type BatchVectorizer[T dto.Embedding] struct {
+ client BatchClient[T]
+ objectVectorizer *objectsvectorizer.ObjectVectorizer
+ batchVectorizer *batch.Batch[T]
+ tokenizerFunc batch.TokenizerFuncType
+ encoderCache *batch.EncoderCache
+}
+
+type BatchClient[T dto.Embedding] interface {
+ batch.BatchClient[T]
+ VectorizeQuery(ctx context.Context, input []string,
+ cfg moduletools.ClassConfig) (*modulecomponents.VectorizationResult[T], error)
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/base_module.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/base_module.go
new file mode 100644
index 0000000000000000000000000000000000000000..fc428ebebd4eaf4b0588424c7b614d44f8078032
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/base_module.go
@@ -0,0 +1,353 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package usage
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/sirupsen/logrus"
+
+ clusterusage "github.com/weaviate/weaviate/cluster/usage"
+ "github.com/weaviate/weaviate/cluster/usage/types"
+ enterrors "github.com/weaviate/weaviate/entities/errors"
+ "github.com/weaviate/weaviate/entities/modulecapabilities"
+ "github.com/weaviate/weaviate/usecases/config"
+)
+
+const (
+ DefaultCollectionInterval = 1 * time.Hour
+ // DefaultShardJitterInterval short for shard-level operations and can be configurable later on
+ DefaultShardJitterInterval = 100 * time.Millisecond
+ DefaultRuntimeLoadInterval = 2 * time.Minute
+ DefaultPolicyVersion = "2025-06-01"
+)
+
+// BaseModule contains the common logic for usage collection modules
+type BaseModule struct {
+ nodeID string
+ policyVersion string
+ moduleName string
+ config *config.Config
+ storage StorageBackend
+ interval time.Duration
+ shardJitter time.Duration
+ stopChan chan struct{}
+ metrics *Metrics
+ usageService clusterusage.Service
+ logger logrus.FieldLogger
+ // mu mutex to protect shared fields to run concurrently the collection and upload
+ // to avoid interval overlap for the tickers
+ mu sync.RWMutex
+}
+
+// NewBaseModule creates a new base module instance
+func NewBaseModule(moduleName string, storage StorageBackend) *BaseModule {
+ return &BaseModule{
+ interval: DefaultCollectionInterval,
+ shardJitter: DefaultShardJitterInterval,
+ stopChan: make(chan struct{}),
+ storage: storage,
+ moduleName: moduleName,
+ }
+}
+
+func (b *BaseModule) SetUsageService(usageService any) {
+ if service, ok := usageService.(clusterusage.Service); ok {
+ b.usageService = service
+ service.SetJitterInterval(b.shardJitter)
+ }
+}
+
+func (b *BaseModule) Name() string {
+ return b.moduleName
+}
+
+func (b *BaseModule) Type() modulecapabilities.ModuleType {
+ return modulecapabilities.Usage
+}
+
+// InitializeCommon initializes the common components of the usage module
+func (b *BaseModule) InitializeCommon(ctx context.Context, config *config.Config, logger logrus.FieldLogger, metrics *Metrics) error {
+ b.config = config
+ b.logger = logger.WithField("component", b.moduleName)
+ b.metrics = metrics
+ if b.config.Cluster.Hostname == "" {
+ return fmt.Errorf("cluster hostname is not set")
+ }
+
+ b.nodeID = b.config.Cluster.Hostname
+
+ // Initialize policy version
+ if b.config.Usage.PolicyVersion != nil {
+ b.policyVersion = b.config.Usage.PolicyVersion.Get()
+ }
+ if b.policyVersion == "" {
+ b.policyVersion = DefaultPolicyVersion
+ }
+
+ if b.config.Usage.ScrapeInterval != nil {
+ if interval := b.config.Usage.ScrapeInterval.Get(); interval > 0 {
+ b.interval = interval
+ }
+ }
+
+ // Initialize shard jitter interval
+ if b.config.Usage.ShardJitterInterval != nil {
+ if jitterInterval := b.config.Usage.ShardJitterInterval.Get(); jitterInterval > 0 {
+ b.shardJitter = jitterInterval
+ }
+ }
+
+ // Verify storage permissions (opt-in)
+ var shouldVerifyPermissions bool
+ if b.config.Usage.VerifyPermissions != nil {
+ shouldVerifyPermissions = b.config.Usage.VerifyPermissions.Get()
+ }
+
+ if shouldVerifyPermissions {
+ if err := b.storage.VerifyPermissions(ctx); err != nil {
+ return fmt.Errorf("failed to verify storage permissions: %w", err)
+ }
+ b.logger.Info("storage permissions verified successfully")
+ } else {
+ b.logger.Info("storage permission verification skipped (disabled by configuration)")
+ }
+
+ // Start periodic collection and upload
+ enterrors.GoWrapper(func() {
+ b.collectAndUploadPeriodically(context.Background())
+ }, b.logger)
+
+ b.logger.Infof("%s module initialized successfully", b.moduleName)
+ return nil
+}
+
+func (b *BaseModule) collectAndUploadPeriodically(ctx context.Context) {
+ // Validate intervals before creating tickers
+ if b.interval <= 0 {
+ b.logger.Warn("Invalid collection interval (<= 0), using default of 1 hour")
+ b.interval = DefaultCollectionInterval
+ }
+
+ loadInterval := b.config.RuntimeOverrides.LoadInterval
+ if loadInterval <= 0 {
+ b.logger.Warn("Invalid runtime overrides load interval (<= 0), using default of 2 minutes")
+ loadInterval = DefaultRuntimeLoadInterval
+ }
+
+ b.logger.WithFields(logrus.Fields{
+ "base_interval": b.interval.String(),
+ "load_interval": loadInterval.String(),
+ "shard_jitter": b.shardJitter.String(),
+ "default_shard_jitter": DefaultShardJitterInterval.String(),
+ }).Debug("starting periodic collection with ticker")
+
+ // Create ticker with base interval
+ ticker := time.NewTicker(b.interval)
+ defer ticker.Stop()
+
+ loadTicker := time.NewTicker(loadInterval)
+ defer loadTicker.Stop()
+
+ b.logger.WithFields(logrus.Fields{
+ "interval": b.interval.String(),
+ "ticker_created": time.Now(),
+ "next_fire": time.Now().Add(b.interval),
+ }).Debug("ticker created successfully, entering main loop")
+
+ for {
+ select {
+ case <-ticker.C:
+ b.logger.WithFields(logrus.Fields{
+ "current_time": time.Now(),
+ "ticker_type": "collection",
+ "interval": b.interval.String(),
+ }).Debug("collection ticker fired - starting collection cycle")
+
+ enterrors.GoWrapper(func() {
+ if err := b.collectAndUploadUsage(ctx); err != nil {
+ b.logger.WithError(err).Error("Failed to collect and upload usage data")
+ b.metrics.OperationTotal.WithLabelValues("collect_and_upload", "error").Inc()
+ } else {
+ b.metrics.OperationTotal.WithLabelValues("collect_and_upload", "success").Inc()
+ }
+ }, b.logger)
+
+ // ticker is used to reset the interval
+ b.reloadConfig(ticker)
+
+ case <-loadTicker.C:
+ b.logger.WithFields(logrus.Fields{
+ "ticker_type": "runtime_overrides",
+ "interval": loadInterval.String(),
+ }).Debug("runtime overrides reloaded")
+ // ticker is used to reset the interval
+ b.reloadConfig(ticker)
+
+ case <-ctx.Done():
+ b.logger.WithFields(logrus.Fields{"error": ctx.Err()}).Info("context cancelled - stopping periodic collection")
+ return
+ case <-b.stopChan:
+ b.logger.Info("stop signal received - stopping periodic collection")
+ return
+ }
+ }
+}
+
+func (b *BaseModule) collectAndUploadUsage(ctx context.Context) error {
+ start := time.Now()
+ now := start.UTC()
+ collectionTime := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute(), 0, 0, time.UTC).Format("2006-01-02T15-04-05Z")
+
+ // Collect usage data and update metrics with timing
+ usage, err := b.collectUsageData(ctx)
+ collectionDuration := time.Since(start)
+
+ // Record collection latency in Prometheus histogram
+ b.metrics.OperationLatency.WithLabelValues("collect").Observe(collectionDuration.Seconds())
+
+ b.logger.WithFields(logrus.Fields{
+ "collection_duration_s": collectionDuration.Seconds(),
+ }).Debug("usage data collection completed")
+
+ if err != nil {
+ return err
+ }
+
+ // Set version on usage data
+ // Lock to protect shared fields and upload operation
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ usage.Version = b.policyVersion
+ usage.CollectingTime = collectionTime
+
+ return b.storage.UploadUsageData(ctx, usage)
+}
+
+func (b *BaseModule) collectUsageData(ctx context.Context) (*types.Report, error) {
+ if b.usageService == nil {
+ return nil, fmt.Errorf("usage service not initialized")
+ }
+
+ usage, err := b.usageService.Usage(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get usage data: %w", err)
+ }
+
+ // Compute total collections and update gauge.
+ totalCollections := float64(len(usage.Collections))
+ b.metrics.ResourceCount.WithLabelValues("collections").Set(totalCollections)
+
+ // Compute total shards and update gauge.
+ var totalShards float64
+ for _, coll := range usage.Collections {
+ totalShards += float64(coll.UniqueShardCount)
+ }
+ b.metrics.ResourceCount.WithLabelValues("shards").Set(totalShards)
+
+ // Compute total backups and update gauge.
+ totalBackups := float64(len(usage.Backups))
+ b.metrics.ResourceCount.WithLabelValues("backups").Set(totalBackups)
+
+ return usage, nil
+}
+
+func (b *BaseModule) reloadConfig(ticker *time.Ticker) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ // Check for interval updates
+ if interval := b.config.Usage.ScrapeInterval.Get(); interval > 0 && b.interval != interval {
+ b.logger.WithFields(logrus.Fields{
+ "old_interval": b.interval.String(),
+ "new_interval": interval.String(),
+ }).Info("collection interval updated")
+ b.interval = interval
+ // Reset ticker with new interval
+ ticker.Reset(b.interval)
+ } else if interval <= 0 && b.interval <= 0 {
+ // If both old and new intervals are invalid, set a default
+ b.logger.Warn("Invalid interval detected during reload, using default of 1 hour")
+ b.interval = DefaultCollectionInterval
+ ticker.Reset(b.interval)
+ }
+
+ // Check for shard jitter interval updates
+ // Note: we allow 0 as a valid value for the shard jitter interval
+ if jitterInterval := b.config.Usage.ShardJitterInterval.Get(); jitterInterval >= 0 && b.shardJitter != jitterInterval {
+ b.logger.WithFields(logrus.Fields{
+ "old_jitter": b.shardJitter.String(),
+ "new_jitter": jitterInterval.String(),
+ }).Info("shard jitter interval updated")
+ b.shardJitter = jitterInterval
+ b.usageService.SetJitterInterval(b.shardJitter)
+ }
+
+ // Build common storage config
+ storageConfig := b.buildStorageConfig()
+
+ // Update storage backend configuration
+ if changed, err := b.storage.UpdateConfig(storageConfig); err != nil {
+ b.logger.WithError(err).Error("Failed to update storage configuration")
+ } else if changed {
+ b.logger.Info("storage configuration updated")
+ }
+}
+
+func (b *BaseModule) buildStorageConfig() StorageConfig {
+ config := StorageConfig{
+ NodeID: b.nodeID,
+ Version: b.policyVersion,
+ VerifyPermissions: false,
+ }
+
+ // Set verification setting from configuration
+ if b.config.Usage.VerifyPermissions != nil {
+ config.VerifyPermissions = b.config.Usage.VerifyPermissions.Get()
+ }
+
+ if b.config.Usage.S3Bucket != nil {
+ config.Bucket = b.config.Usage.S3Bucket.Get()
+ }
+ if b.config.Usage.S3Prefix != nil {
+ config.Prefix = b.config.Usage.S3Prefix.Get()
+ }
+
+ if b.config.Usage.GCSBucket != nil {
+ config.Bucket = b.config.Usage.GCSBucket.Get()
+ }
+
+ if b.config.Usage.GCSPrefix != nil {
+ config.Prefix = b.config.Usage.GCSPrefix.Get()
+ }
+
+ return config
+}
+
+func (b *BaseModule) Close() error {
+ close(b.stopChan)
+ if b.storage != nil {
+ return b.storage.Close()
+ }
+ return nil
+}
+
+func (b *BaseModule) Logger() logrus.FieldLogger {
+ return b.logger
+}
+
+func (b *BaseModule) GetMetrics() *Metrics {
+ return b.metrics
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/base_module_test.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/base_module_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..2b41e50b0c599ad6fdcc6f14b178b43681d7135d
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/base_module_test.go
@@ -0,0 +1,41 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package usage
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestBaseModule_ShardJitterConfiguration(t *testing.T) {
+ // Test 1: Check that the default value is correct
+ assert.Equal(t, 100*time.Millisecond, DefaultShardJitterInterval, "jitter interval should be 100ms")
+
+ // Test 2: Test that the module initializes with default jitter
+ module := NewBaseModule("test-module", nil)
+ assert.Equal(t, DefaultShardJitterInterval, module.shardJitter, "module should use default jitter")
+
+ // Test 3: Test configurable jitter
+ customJitter := 50 * time.Millisecond
+ module.shardJitter = customJitter
+ assert.Equal(t, customJitter, module.shardJitter, "module should use custom jitter")
+
+ // Test 4: Test that zero jitter is handled gracefully
+ module.shardJitter = 0
+ assert.Equal(t, 0*time.Millisecond, module.shardJitter, "module should allow zero jitter")
+
+ // Test 5: Test that negative jitter is handled gracefully
+ module.shardJitter = -1 * time.Millisecond
+ assert.Equal(t, -1*time.Millisecond, module.shardJitter, "module should allow negative jitter")
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/base_storage.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/base_storage.go
new file mode 100644
index 0000000000000000000000000000000000000000..d6505e8b40e1a97d52cecf6fd3f3d7ed5993f90b
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/base_storage.go
@@ -0,0 +1,196 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package usage
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/weaviate/weaviate/cluster/usage/types"
+ entcfg "github.com/weaviate/weaviate/entities/config"
+ "github.com/weaviate/weaviate/usecases/config"
+ "github.com/weaviate/weaviate/usecases/config/runtime"
+)
+
+// BaseStorage provides common functionality for all storage backends
+type BaseStorage struct {
+ BucketName string
+ Prefix string
+ NodeID string
+ VerifyPermissions bool
+ Logger logrus.FieldLogger
+ Metrics *Metrics
+}
+
+// NewBaseStorage creates a new base storage instance
+func NewBaseStorage(logger logrus.FieldLogger, metrics *Metrics) *BaseStorage {
+ return &BaseStorage{
+ Logger: logger,
+ Metrics: metrics,
+ }
+}
+
+// ConstructObjectKey creates the full object key path for storage
+func (b *BaseStorage) ConstructObjectKey(collectionTime string) string {
+ filename := fmt.Sprintf("%s.json", collectionTime)
+
+ objectKey := fmt.Sprintf("%s/%s", b.NodeID, filename)
+ if b.Prefix != "" {
+ objectKey = fmt.Sprintf("%s/%s/%s", b.Prefix, b.NodeID, filename)
+ }
+ return objectKey
+}
+
+// MarshalUsageData converts usage data to JSON
+func (b *BaseStorage) MarshalUsageData(usage *types.Report) ([]byte, error) {
+ data, err := json.Marshal(usage)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal usage data: %w", err)
+ }
+ return data, nil
+}
+
+// UpdateCommonConfig updates common configuration fields and returns whether anything changed
+func (b *BaseStorage) UpdateCommonConfig(config StorageConfig) bool {
+ changed := false
+
+ // Check for bucket name changes
+ if config.Bucket != "" && b.BucketName != config.Bucket {
+ b.Logger.WithFields(logrus.Fields{
+ "old_bucket": b.BucketName,
+ "new_bucket": config.Bucket,
+ }).Warn("bucket name changed - this may require re-authentication")
+ b.BucketName = config.Bucket
+ changed = true
+ }
+
+ // Check for prefix changes
+ if b.Prefix != config.Prefix {
+ b.Logger.WithFields(logrus.Fields{
+ "old_prefix": b.Prefix,
+ "new_prefix": config.Prefix,
+ }).Info("upload prefix updated")
+ b.Prefix = config.Prefix
+ changed = true
+ }
+
+ if b.VerifyPermissions != config.VerifyPermissions {
+ b.Logger.WithFields(logrus.Fields{
+ "old_verify_permissions": b.VerifyPermissions,
+ "new_verify_permissions": config.VerifyPermissions,
+ }).Info("verify permissions updated")
+ b.VerifyPermissions = config.VerifyPermissions
+ changed = true
+ }
+
+ // Check for nodeID changes
+ if config.NodeID != "" && b.NodeID != config.NodeID {
+ b.Logger.WithFields(logrus.Fields{
+ "old_node_id": b.NodeID,
+ "new_node_id": config.NodeID,
+ }).Info("node ID updated")
+ b.NodeID = config.NodeID
+ changed = true
+ }
+
+ return changed
+}
+
+// IsLocalhostEnvironment checks if running in localhost/emulator mode
+func (b *BaseStorage) IsLocalhostEnvironment() bool {
+ return os.Getenv("CLUSTER_IN_LOCALHOST") != ""
+}
+
+// LogVerificationStart logs the start of permission verification
+func (b *BaseStorage) LogVerificationStart() {
+ b.Logger.WithFields(logrus.Fields{
+ "action": "verify_bucket_permissions",
+ "bucket": b.BucketName,
+ "prefix": b.Prefix,
+ }).Info("")
+}
+
+// LogVerificationSuccess logs successful permission verification
+func (b *BaseStorage) LogVerificationSuccess(extraFields ...logrus.Fields) {
+ fields := logrus.Fields{
+ "bucket": b.BucketName,
+ "prefix": b.Prefix,
+ }
+
+ // Merge any extra fields provided
+ for _, extra := range extraFields {
+ for k, v := range extra {
+ fields[k] = v
+ }
+ }
+
+ b.Logger.WithFields(fields).Info("permissions verified successfully")
+}
+
+// RecordUploadMetrics records upload metrics
+func (b *BaseStorage) RecordUploadMetrics(dataSize int) {
+ if b.Metrics != nil && b.Metrics.UploadedFileSize != nil {
+ b.Metrics.UploadedFileSize.Set(float64(dataSize))
+ }
+}
+
+// ParseCommonUsageConfig parses common environment variables shared by all usage modules
+func ParseCommonUsageConfig(config *config.Config) error {
+ // Parse common environment variables that both S3 and GCS modules use
+ scrapeInterval := DefaultCollectionInterval
+ if v := os.Getenv("USAGE_SCRAPE_INTERVAL"); v != "" {
+ duration, err := time.ParseDuration(v)
+ if err != nil {
+ return fmt.Errorf("invalid %s: %w", "USAGE_SCRAPE_INTERVAL", err)
+ }
+ scrapeInterval = duration
+ } else if config.Usage.ScrapeInterval != nil {
+ scrapeInterval = config.Usage.ScrapeInterval.Get()
+ }
+ config.Usage.ScrapeInterval = runtime.NewDynamicValue(scrapeInterval)
+
+ policyVersion := DefaultPolicyVersion
+ if v := os.Getenv("USAGE_POLICY_VERSION"); v != "" {
+ policyVersion = v
+ } else if config.Usage.PolicyVersion != nil {
+ policyVersion = config.Usage.PolicyVersion.Get()
+ }
+ config.Usage.PolicyVersion = runtime.NewDynamicValue(policyVersion)
+
+ // Parse shard jitter interval environment variable
+ shardJitterInterval := DefaultShardJitterInterval
+ if v := os.Getenv("USAGE_SHARD_JITTER_INTERVAL"); v != "" {
+ duration, err := time.ParseDuration(v)
+ if err != nil {
+ return fmt.Errorf("invalid %s: %w", "USAGE_SHARD_JITTER_INTERVAL", err)
+ }
+ shardJitterInterval = duration
+ } else if config.Usage.ShardJitterInterval != nil {
+ shardJitterInterval = config.Usage.ShardJitterInterval.Get()
+ }
+ config.Usage.ShardJitterInterval = runtime.NewDynamicValue(shardJitterInterval)
+
+ // Parse verify permissions setting
+ verifyPermissions := false
+ if v := os.Getenv("USAGE_VERIFY_PERMISSIONS"); v != "" {
+ verifyPermissions = entcfg.Enabled(v)
+ } else if config.Usage.VerifyPermissions != nil {
+ verifyPermissions = config.Usage.VerifyPermissions.Get()
+ }
+ config.Usage.VerifyPermissions = runtime.NewDynamicValue(verifyPermissions)
+
+ return nil
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/common_test.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/common_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..b4e3921c7fa4327b6f00f8b65ea57fbf9a0576e2
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/common_test.go
@@ -0,0 +1,368 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package usage
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ clusterusage "github.com/weaviate/weaviate/cluster/usage"
+ "github.com/weaviate/weaviate/cluster/usage/types"
+ "github.com/weaviate/weaviate/usecases/config"
+ "github.com/weaviate/weaviate/usecases/config/runtime"
+ usagetypes "github.com/weaviate/weaviate/usecases/modulecomponents/usage/types"
+)
+
+// TestUsageResponse_Marshaling tests the JSON marshaling of usage response
+func TestUsageResponse_Marshaling(t *testing.T) {
+ u := &types.Report{
+ Node: "test-node",
+ Version: "2025-06-01",
+ Collections: []*types.CollectionUsage{
+ {
+ Name: "test-collection",
+ UniqueShardCount: 1,
+ },
+ },
+ Backups: []*types.BackupUsage{
+ {
+ ID: "test-backup",
+ CompletionTime: "2024-01-01T00:00:00Z",
+ SizeInGib: 1.5,
+ Type: "full",
+ Collections: []string{"test-collection"},
+ },
+ },
+ }
+
+ data, err := json.MarshalIndent(u, "", " ")
+ require.NoError(t, err)
+
+ var unmarshaled types.Report
+ err = json.Unmarshal(data, &unmarshaled)
+ require.NoError(t, err)
+
+ assert.Equal(t, u.Node, unmarshaled.Node)
+ assert.Equal(t, u.Version, unmarshaled.Version)
+ assert.Equal(t, len(u.Collections), len(unmarshaled.Collections))
+ if len(u.Collections) > 0 && len(unmarshaled.Collections) > 0 {
+ assert.Equal(t, u.Collections[0].Name, unmarshaled.Collections[0].Name)
+ assert.Equal(t, u.Collections[0].UniqueShardCount, unmarshaled.Collections[0].UniqueShardCount)
+ }
+ assert.Equal(t, len(u.Backups), len(unmarshaled.Backups))
+ if len(u.Backups) > 0 && len(unmarshaled.Backups) > 0 {
+ assert.Equal(t, u.Backups[0].ID, unmarshaled.Backups[0].ID)
+ assert.Equal(t, u.Backups[0].Type, unmarshaled.Backups[0].Type)
+ assert.Equal(t, u.Backups[0].SizeInGib, unmarshaled.Backups[0].SizeInGib)
+ }
+}
+
+// TestMetrics_Initialization tests that metrics are properly initialized
+func TestMetrics_Initialization(t *testing.T) {
+ registry := prometheus.NewRegistry()
+ metrics := NewMetrics(registry, "test-module")
+
+ assert.NotNil(t, metrics)
+ assert.NotNil(t, metrics.OperationTotal)
+ assert.NotNil(t, metrics.OperationLatency)
+ assert.NotNil(t, metrics.ResourceCount)
+ assert.NotNil(t, metrics.UploadedFileSize)
+}
+
+// TestStorageConfig_Validation tests the storage config validation
+func TestStorageConfig_Validation(t *testing.T) {
+ tests := []struct {
+ name string
+ config StorageConfig
+ valid bool
+ }{
+ {
+ name: "valid config",
+ config: StorageConfig{
+ Bucket: "test-bucket",
+ Prefix: "test-prefix",
+ NodeID: "test-node",
+ Version: "2025-06-01",
+ },
+ valid: true,
+ },
+ {
+ name: "empty bucket",
+ config: StorageConfig{
+ Bucket: "",
+ Prefix: "test-prefix",
+ NodeID: "test-node",
+ Version: "2025-06-01",
+ },
+ valid: false,
+ },
+ {
+ name: "empty nodeID",
+ config: StorageConfig{
+ Bucket: "test-bucket",
+ Prefix: "test-prefix",
+ NodeID: "",
+ Version: "2025-06-01",
+ },
+ valid: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if tt.valid {
+ assert.NotEmpty(t, tt.config.Bucket)
+ assert.NotEmpty(t, tt.config.NodeID)
+ } else {
+ assert.True(t, tt.config.Bucket == "" || tt.config.NodeID == "")
+ }
+ })
+ }
+}
+
+// TestModule_CollectAndUploadPeriodically_ContextCancellation tests context cancellation behavior
+func TestModule_CollectAndUploadPeriodically_ContextCancellation(t *testing.T) {
+ logger := logrus.New()
+
+ // Create mock storage and usage service
+ mockStorage := NewMockStorageBackend(t)
+ mockUsageService := clusterusage.NewMockService(t)
+
+ // Set up expectations
+ mockUsageService.EXPECT().Usage(mock.Anything).Return(&types.Report{Node: "test-node"}, nil).Maybe()
+ mockStorage.EXPECT().UploadUsageData(mock.Anything, mock.Anything).Return(nil).Maybe()
+
+ // Create base module
+ baseModule := NewBaseModule("test-module", mockStorage)
+ baseModule.interval = 100 * time.Millisecond
+ baseModule.usageService = mockUsageService
+ baseModule.nodeID = "test-node"
+ baseModule.policyVersion = "2025-06-01"
+ baseModule.metrics = NewMetrics(prometheus.NewRegistry(), "test-module")
+ baseModule.logger = logger.WithField("component", "test-module")
+
+ // Set up config with proper RuntimeOverrides to prevent panic
+ baseModule.config = &config.Config{
+ RuntimeOverrides: config.RuntimeOverrides{
+ LoadInterval: 2 * time.Minute,
+ },
+ }
+
+ // Create context with cancellation
+ ctx, cancel := context.WithCancel(context.Background())
+
+ // Start the periodic collection in a goroutine
+ done := make(chan struct{})
+ go func() {
+ baseModule.collectAndUploadPeriodically(ctx)
+ close(done)
+ }()
+
+ // Cancel context after a short delay
+ time.Sleep(50 * time.Millisecond)
+ cancel()
+
+ // Wait for goroutine to finish
+ select {
+ case <-done:
+ // Success - goroutine exited
+ case <-time.After(1 * time.Second):
+ t.Fatal("Goroutine did not exit within timeout")
+ }
+
+ mockStorage.AssertExpectations(t)
+}
+
+// TestModule_CollectAndUploadPeriodically_StopSignal tests stop signal behavior
+func TestModule_CollectAndUploadPeriodically_StopSignal(t *testing.T) {
+ logger := logrus.New()
+
+ // Create mock storage and usage service
+ mockStorage := NewMockStorageBackend(t)
+ mockUsageService := clusterusage.NewMockService(t)
+
+ // Set up expectations
+ mockUsageService.EXPECT().Usage(mock.Anything).Return(&types.Report{Node: "test-node"}, nil).Maybe()
+ mockStorage.EXPECT().UploadUsageData(mock.Anything, mock.Anything).Return(nil).Maybe()
+
+ // Create base module
+ baseModule := NewBaseModule("test-module", mockStorage)
+ baseModule.interval = 100 * time.Millisecond
+ baseModule.usageService = mockUsageService
+ baseModule.nodeID = "test-node"
+ baseModule.policyVersion = "2025-06-01"
+ baseModule.metrics = NewMetrics(prometheus.NewRegistry(), "test-module")
+ baseModule.logger = logger.WithField("component", "test-module")
+
+ // Set up config with proper RuntimeOverrides to prevent panic
+ baseModule.config = &config.Config{
+ RuntimeOverrides: config.RuntimeOverrides{
+ LoadInterval: 2 * time.Minute,
+ },
+ }
+
+ // Start the periodic collection in a goroutine
+ done := make(chan struct{})
+ go func() {
+ baseModule.collectAndUploadPeriodically(context.Background())
+ close(done)
+ }()
+
+ // Send stop signal after a short delay
+ time.Sleep(50 * time.Millisecond)
+ close(baseModule.stopChan)
+
+ // Wait for goroutine to finish
+ select {
+ case <-done:
+ // Success - goroutine exited
+ case <-time.After(1 * time.Second):
+ t.Fatal("Goroutine did not exit within timeout")
+ }
+
+ mockStorage.AssertExpectations(t)
+}
+
+// TestCollectAndUploadPeriodically_ConfigChangesAndStop tests config changes and stop behavior
+func TestCollectAndUploadPeriodically_ConfigChangesAndStop(t *testing.T) {
+ logger := logrus.New()
+
+ // Create mock storage and usage service
+ mockStorage := NewMockStorageBackend(t)
+ mockUsageService := clusterusage.NewMockService(t)
+
+ // Set up expectations
+ mockUsageService.EXPECT().Usage(mock.Anything).Return(&types.Report{Node: "test-node"}, nil).Maybe()
+ mockUsageService.EXPECT().SetJitterInterval(mock.Anything).Return().Maybe()
+ mockStorage.EXPECT().UploadUsageData(mock.Anything, mock.Anything).Return(nil).Maybe()
+ mockStorage.EXPECT().UpdateConfig(mock.Anything).Return(true, nil).Maybe()
+
+ // Create base module
+ baseModule := NewBaseModule("test-module", mockStorage)
+ baseModule.interval = 10 * time.Millisecond // Fast ticker for test
+ baseModule.usageService = mockUsageService
+ baseModule.nodeID = "test-node"
+ baseModule.policyVersion = "2025-06-01"
+ baseModule.metrics = NewMetrics(prometheus.NewRegistry(), "test-module")
+ baseModule.logger = logger.WithField("component", "test-module")
+
+ // Use a dynamic config that we can mutate
+ testConfig := config.Config{
+ Usage: usagetypes.UsageConfig{
+ ScrapeInterval: runtime.NewDynamicValue(10 * time.Millisecond),
+ },
+ RuntimeOverrides: config.RuntimeOverrides{
+ LoadInterval: 2 * time.Minute,
+ },
+ }
+ baseModule.config = &testConfig
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // Run the loop in a goroutine
+ done := make(chan struct{})
+ go func() {
+ baseModule.collectAndUploadPeriodically(ctx)
+ close(done)
+ }()
+
+ // Let it run for a few cycles
+ time.Sleep(30 * time.Millisecond)
+
+ // Change config values
+ baseModule.config.Usage.ScrapeInterval.SetValue(20 * time.Millisecond)
+
+ // Let it run for a few more cycles
+ time.Sleep(50 * time.Millisecond)
+
+ // Stop the loop
+ close(baseModule.stopChan)
+
+ // Wait for goroutine to exit
+ select {
+ case <-done:
+ case <-time.After(1 * time.Second):
+ t.Fatal("collectAndUploadPeriodically did not exit in time")
+ }
+
+ // Assert the interval was updated
+ assert.Equal(t, 20*time.Millisecond, baseModule.interval)
+
+ mockStorage.AssertExpectations(t)
+}
+
+// TestModule_ZeroIntervalProtection tests that the module handles zero intervals gracefully
+func TestModule_ZeroIntervalProtection(t *testing.T) {
+ logger := logrus.New()
+
+ // Create mock storage and usage service
+ mockStorage := NewMockStorageBackend(t)
+ mockUsageService := clusterusage.NewMockService(t)
+
+ // Set up expectations
+ mockUsageService.EXPECT().Usage(mock.Anything).Return(&types.Report{Node: "test-node"}, nil).Maybe()
+ mockStorage.EXPECT().UploadUsageData(mock.Anything, mock.Anything).Return(nil).Maybe()
+ mockStorage.EXPECT().UpdateConfig(mock.Anything).Return(false, nil).Maybe()
+
+ // Create base module
+ baseModule := NewBaseModule("test-module", mockStorage)
+ baseModule.interval = 0 // Set invalid interval
+ baseModule.usageService = mockUsageService
+ baseModule.nodeID = "test-node"
+ baseModule.policyVersion = "2025-06-01"
+ baseModule.metrics = NewMetrics(prometheus.NewRegistry(), "test-module")
+ baseModule.logger = logger.WithField("component", "test-module")
+
+ // Set up config with zero runtime overrides interval
+ baseModule.config = &config.Config{
+ RuntimeOverrides: config.RuntimeOverrides{
+ LoadInterval: 0, // Invalid interval
+ },
+ }
+
+ // This should not panic and should use default values
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // Start the periodic collection in a goroutine
+ done := make(chan struct{})
+ go func() {
+ baseModule.collectAndUploadPeriodically(ctx)
+ close(done)
+ }()
+
+ // Cancel context after a short delay
+ time.Sleep(50 * time.Millisecond)
+ cancel()
+
+ // Wait for goroutine to finish
+ select {
+ case <-done:
+ // Success - goroutine exited without panic
+ case <-time.After(1 * time.Second):
+ t.Fatal("Goroutine did not exit within timeout")
+ }
+
+ // Verify that the interval was set to a valid value
+ assert.Greater(t, baseModule.interval, time.Duration(0))
+
+ mockStorage.AssertExpectations(t)
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/metrics.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/metrics.go
new file mode 100644
index 0000000000000000000000000000000000000000..72bac231a8e578c67aa69dac97ed2acb0cb2e812
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/metrics.go
@@ -0,0 +1,64 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package usage
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+)
+
+type Metrics struct {
+ // Operation metrics
+ OperationTotal *prometheus.CounterVec
+ OperationLatency *prometheus.HistogramVec
+
+ // Resource metrics
+ ResourceCount *prometheus.GaugeVec
+ UploadedFileSize prometheus.Gauge
+}
+
+func NewMetrics(reg prometheus.Registerer, moduleName string) *Metrics {
+ moduleName = fmt.Sprintf("weaviate_%s", strings.ReplaceAll(strings.ToLower(moduleName), "-", "_"))
+ return &Metrics{
+ OperationTotal: promauto.With(reg).NewCounterVec(
+ prometheus.CounterOpts{
+ Name: moduleName + "_operations_total",
+ Help: "Total number of " + moduleName + " operations",
+ },
+ []string{"operation", "status"}, // operation: collect/upload, status: success/error
+ ),
+ OperationLatency: prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: moduleName + "_operation_latency_seconds",
+ Help: "Latency of usage operations in seconds",
+ Buckets: prometheus.DefBuckets,
+ },
+ []string{"operation"}, // collect/upload
+ ),
+ ResourceCount: promauto.With(reg).NewGaugeVec(
+ prometheus.GaugeOpts{
+ Name: moduleName + "_resource_count",
+ Help: "Number of resources tracked by " + moduleName,
+ },
+ []string{"resource_type"}, // type: collections/shards/backups
+ ),
+ UploadedFileSize: promauto.With(reg).NewGauge(
+ prometheus.GaugeOpts{
+ Name: moduleName + "_uploaded_file_size_bytes",
+ Help: "Size of the last uploaded usage file in bytes",
+ },
+ ),
+ }
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/mock_storage_backend.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/mock_storage_backend.go
new file mode 100644
index 0000000000000000000000000000000000000000..df5b373b513e98724160c97070eb3061ba23dacf
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/mock_storage_backend.go
@@ -0,0 +1,242 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+// Code generated by mockery v2.53.2. DO NOT EDIT.
+
+package usage
+
+import (
+ context "context"
+
+ mock "github.com/stretchr/testify/mock"
+ types "github.com/weaviate/weaviate/cluster/usage/types"
+)
+
+// MockStorageBackend is an autogenerated mock type for the StorageBackend type
+type MockStorageBackend struct {
+ mock.Mock
+}
+
+type MockStorageBackend_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *MockStorageBackend) EXPECT() *MockStorageBackend_Expecter {
+ return &MockStorageBackend_Expecter{mock: &_m.Mock}
+}
+
+// Close provides a mock function with no fields
+func (_m *MockStorageBackend) Close() error {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for Close")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func() error); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// MockStorageBackend_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'
+type MockStorageBackend_Close_Call struct {
+ *mock.Call
+}
+
+// Close is a helper method to define mock.On call
+func (_e *MockStorageBackend_Expecter) Close() *MockStorageBackend_Close_Call {
+ return &MockStorageBackend_Close_Call{Call: _e.mock.On("Close")}
+}
+
+func (_c *MockStorageBackend_Close_Call) Run(run func()) *MockStorageBackend_Close_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *MockStorageBackend_Close_Call) Return(_a0 error) *MockStorageBackend_Close_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *MockStorageBackend_Close_Call) RunAndReturn(run func() error) *MockStorageBackend_Close_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateConfig provides a mock function with given fields: config
+func (_m *MockStorageBackend) UpdateConfig(config StorageConfig) (bool, error) {
+ ret := _m.Called(config)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateConfig")
+ }
+
+ var r0 bool
+ var r1 error
+ if rf, ok := ret.Get(0).(func(StorageConfig) (bool, error)); ok {
+ return rf(config)
+ }
+ if rf, ok := ret.Get(0).(func(StorageConfig) bool); ok {
+ r0 = rf(config)
+ } else {
+ r0 = ret.Get(0).(bool)
+ }
+
+ if rf, ok := ret.Get(1).(func(StorageConfig) error); ok {
+ r1 = rf(config)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// MockStorageBackend_UpdateConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateConfig'
+type MockStorageBackend_UpdateConfig_Call struct {
+ *mock.Call
+}
+
+// UpdateConfig is a helper method to define mock.On call
+// - config StorageConfig
+func (_e *MockStorageBackend_Expecter) UpdateConfig(config interface{}) *MockStorageBackend_UpdateConfig_Call {
+ return &MockStorageBackend_UpdateConfig_Call{Call: _e.mock.On("UpdateConfig", config)}
+}
+
+func (_c *MockStorageBackend_UpdateConfig_Call) Run(run func(config StorageConfig)) *MockStorageBackend_UpdateConfig_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(StorageConfig))
+ })
+ return _c
+}
+
+func (_c *MockStorageBackend_UpdateConfig_Call) Return(_a0 bool, _a1 error) *MockStorageBackend_UpdateConfig_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *MockStorageBackend_UpdateConfig_Call) RunAndReturn(run func(StorageConfig) (bool, error)) *MockStorageBackend_UpdateConfig_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UploadUsageData provides a mock function with given fields: ctx, _a1
+func (_m *MockStorageBackend) UploadUsageData(ctx context.Context, _a1 *types.Report) error {
+ ret := _m.Called(ctx, _a1)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UploadUsageData")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, *types.Report) error); ok {
+ r0 = rf(ctx, _a1)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// MockStorageBackend_UploadUsageData_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UploadUsageData'
+type MockStorageBackend_UploadUsageData_Call struct {
+ *mock.Call
+}
+
+// UploadUsageData is a helper method to define mock.On call
+// - ctx context.Context
+// - _a1 *types.Report
+func (_e *MockStorageBackend_Expecter) UploadUsageData(ctx interface{}, _a1 interface{}) *MockStorageBackend_UploadUsageData_Call {
+ return &MockStorageBackend_UploadUsageData_Call{Call: _e.mock.On("UploadUsageData", ctx, _a1)}
+}
+
+func (_c *MockStorageBackend_UploadUsageData_Call) Run(run func(ctx context.Context, _a1 *types.Report)) *MockStorageBackend_UploadUsageData_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*types.Report))
+ })
+ return _c
+}
+
+func (_c *MockStorageBackend_UploadUsageData_Call) Return(_a0 error) *MockStorageBackend_UploadUsageData_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *MockStorageBackend_UploadUsageData_Call) RunAndReturn(run func(context.Context, *types.Report) error) *MockStorageBackend_UploadUsageData_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// VerifyPermissions provides a mock function with given fields: ctx
+func (_m *MockStorageBackend) VerifyPermissions(ctx context.Context) error {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for VerifyPermissions")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context) error); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// MockStorageBackend_VerifyPermissions_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'VerifyPermissions'
+type MockStorageBackend_VerifyPermissions_Call struct {
+ *mock.Call
+}
+
+// VerifyPermissions is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *MockStorageBackend_Expecter) VerifyPermissions(ctx interface{}) *MockStorageBackend_VerifyPermissions_Call {
+ return &MockStorageBackend_VerifyPermissions_Call{Call: _e.mock.On("VerifyPermissions", ctx)}
+}
+
+func (_c *MockStorageBackend_VerifyPermissions_Call) Run(run func(ctx context.Context)) *MockStorageBackend_VerifyPermissions_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *MockStorageBackend_VerifyPermissions_Call) Return(_a0 error) *MockStorageBackend_VerifyPermissions_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *MockStorageBackend_VerifyPermissions_Call) RunAndReturn(run func(context.Context) error) *MockStorageBackend_VerifyPermissions_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewMockStorageBackend creates a new instance of MockStorageBackend. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewMockStorageBackend(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *MockStorageBackend {
+ mock := &MockStorageBackend{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/storage.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/storage.go
new file mode 100644
index 0000000000000000000000000000000000000000..b05b7a75f8ff9f5fdd43d8f4312961707edd327b
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/usage/storage.go
@@ -0,0 +1,42 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package usage
+
+import (
+ "context"
+
+ "github.com/weaviate/weaviate/cluster/usage/types"
+)
+
+// StorageConfig contains common configuration fields for storage backends
+type StorageConfig struct {
+ Bucket string
+ Prefix string
+ NodeID string
+ Version string
+ VerifyPermissions bool
+}
+
+// StorageBackend defines the interface that storage implementations must implement
+type StorageBackend interface {
+ // VerifyPermissions checks if the backend can access the storage location
+ VerifyPermissions(ctx context.Context) error
+
+ // UploadUsageData uploads the usage data to the storage backend
+ UploadUsageData(ctx context.Context, usage *types.Report) error
+
+ // Close cleans up any resources used by the storage backend
+ Close() error
+
+ // UpdateConfig updates the backend configuration from the provided config
+ UpdateConfig(config StorageConfig) (bool, error) // returns true if any changes were made
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/vectorizer/object_texts.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/vectorizer/object_texts.go
new file mode 100644
index 0000000000000000000000000000000000000000..7d9b28f7017d55ee3d4c8557d03c5053dd783138
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/vectorizer/object_texts.go
@@ -0,0 +1,200 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package vectorizer
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ entcfg "github.com/weaviate/weaviate/entities/config"
+
+ "github.com/weaviate/weaviate/entities/models"
+ "github.com/weaviate/weaviate/entities/moduletools"
+
+ "github.com/fatih/camelcase"
+)
+
+type ClassSettings interface {
+ PropertyIndexed(property string) bool
+ VectorizePropertyName(propertyName string) bool
+ VectorizeClassName() bool
+ Properties() []string
+ LowerCaseInput() bool
+}
+
+type ObjectVectorizer struct{}
+
+func New() *ObjectVectorizer {
+ return &ObjectVectorizer{}
+}
+
+func (v *ObjectVectorizer) Texts(ctx context.Context, object *models.Object, icheck ClassSettings,
+) string {
+ text, _ := v.TextsWithTitleProperty(ctx, object, icheck, "")
+ return text
+}
+
+func (v *ObjectVectorizer) separateCamelCase(in string, toLower bool) string {
+ parts := camelcase.Split(in)
+ var sb strings.Builder
+ for i, part := range parts {
+ if part == " " {
+ continue
+ }
+
+ if i > 0 {
+ sb.WriteString(" ")
+ }
+
+ if toLower {
+ part = strings.ToLower(part)
+ }
+
+ sb.WriteString(part)
+ }
+
+ return sb.String()
+}
+
+func (v *ObjectVectorizer) TextsWithTitleProperty(ctx context.Context, object *models.Object, icheck ClassSettings, titlePropertyName string,
+) (string, string) {
+ var corpi []string
+ var titlePropertyValue []string
+
+ toLowerCase := icheck.LowerCaseInput()
+ if entcfg.Enabled(os.Getenv("LOWERCASE_VECTORIZATION_INPUT")) {
+ toLowerCase = true
+ }
+
+ if icheck.VectorizeClassName() {
+ corpi = append(corpi, v.separateCamelCase(object.Class, toLowerCase))
+ }
+ if object.Properties != nil {
+ propMap := object.Properties.(map[string]interface{})
+ for _, propName := range moduletools.SortStringKeys(propMap) {
+ if !icheck.PropertyIndexed(propName) {
+ continue
+ }
+ isTitleProperty := propName == titlePropertyName
+ isPropertyNameVectorizable := icheck.VectorizePropertyName(propName)
+ hasSourceProperties := len(icheck.Properties()) > 0
+
+ switch val := propMap[propName].(type) {
+ case []string:
+ for i := range val {
+ corpi, titlePropertyValue = v.insertValue(val[i], propName,
+ toLowerCase, isPropertyNameVectorizable, isTitleProperty, corpi, titlePropertyValue)
+ }
+ case string:
+ corpi, titlePropertyValue = v.insertValue(val, propName,
+ toLowerCase, isPropertyNameVectorizable, isTitleProperty, corpi, titlePropertyValue)
+ default:
+ // do nothing
+ }
+
+ if hasSourceProperties {
+ // get the values from additional property types only if source properties are present
+ switch val := propMap[propName].(type) {
+ case bool, int, int16, int32, int64, float32, float64:
+ corpi, titlePropertyValue = v.insertValue(fmt.Sprintf("%v", val), propName,
+ toLowerCase, isPropertyNameVectorizable, isTitleProperty, corpi, titlePropertyValue)
+ case json.Number:
+ corpi, titlePropertyValue = v.insertValue(val.String(), propName,
+ toLowerCase, isPropertyNameVectorizable, isTitleProperty, corpi, titlePropertyValue)
+ case time.Time:
+ corpi, titlePropertyValue = v.insertValue(val.Format(time.RFC3339), propName,
+ toLowerCase, isPropertyNameVectorizable, isTitleProperty, corpi, titlePropertyValue)
+ case []any:
+ if len(val) > 0 {
+ if _, ok := val[0].(map[string]any); ok {
+ in := v.marshalValue(val)
+ corpi, titlePropertyValue = v.insertValue(in, propName,
+ toLowerCase, isPropertyNameVectorizable, isTitleProperty, corpi, titlePropertyValue)
+ } else {
+ for i := range val {
+ corpi, titlePropertyValue = v.insertValue(fmt.Sprintf("%v", val[i]), propName,
+ toLowerCase, isPropertyNameVectorizable, isTitleProperty, corpi, titlePropertyValue)
+ }
+ }
+ }
+ case []float64:
+ for i := range val {
+ corpi, titlePropertyValue = v.insertValue(fmt.Sprintf("%v", val[i]), propName,
+ toLowerCase, isPropertyNameVectorizable, isTitleProperty, corpi, titlePropertyValue)
+ }
+ case []int:
+ for i := range val {
+ corpi, titlePropertyValue = v.insertValue(fmt.Sprintf("%v", val[i]), propName,
+ toLowerCase, isPropertyNameVectorizable, isTitleProperty, corpi, titlePropertyValue)
+ }
+ case []int64:
+ for i := range val {
+ corpi, titlePropertyValue = v.insertValue(fmt.Sprintf("%v", val[i]), propName,
+ toLowerCase, isPropertyNameVectorizable, isTitleProperty, corpi, titlePropertyValue)
+ }
+ case []bool:
+ for i := range val {
+ corpi, titlePropertyValue = v.insertValue(fmt.Sprintf("%v", val[i]), propName,
+ toLowerCase, isPropertyNameVectorizable, isTitleProperty, corpi, titlePropertyValue)
+ }
+ case map[string]any, []map[string]any:
+ in := v.marshalValue(val)
+ corpi, titlePropertyValue = v.insertValue(in, propName,
+ toLowerCase, isPropertyNameVectorizable, isTitleProperty, corpi, titlePropertyValue)
+ default:
+ // get the values from additional property types only if source properties are present
+ }
+ }
+ }
+ }
+ if len(corpi) == 0 {
+ // fall back to using the class name
+ corpi = append(corpi, v.separateCamelCase(object.Class, toLowerCase))
+ }
+
+ return strings.Join(corpi, " "), strings.Join(titlePropertyValue, " ")
+}
+
+func (v *ObjectVectorizer) insertValue(
+ val, propName string,
+ toLowerCase, isPropertyNameVectorizable, isTitleProperty bool,
+ corpi, titlePropertyValue []string,
+) ([]string, []string) {
+ val = v.getValue(val, propName, toLowerCase, isPropertyNameVectorizable)
+ if isTitleProperty {
+ titlePropertyValue = append(titlePropertyValue, val)
+ } else {
+ corpi = append(corpi, val)
+ }
+ return corpi, titlePropertyValue
+}
+
+func (v *ObjectVectorizer) getValue(val, propName string, toLowerCase, isPropertyNameVectorizable bool) string {
+ if toLowerCase {
+ val = strings.ToLower(val)
+ }
+ if isPropertyNameVectorizable {
+ val = fmt.Sprintf("%s %s", v.separateCamelCase(propName, toLowerCase), val)
+ }
+ return val
+}
+
+func (v *ObjectVectorizer) marshalValue(in any) string {
+ if val, err := json.Marshal(in); err == nil {
+ return string(val)
+ }
+ return fmt.Sprintf("%v", in)
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/vectorizer/object_texts_test.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/vectorizer/object_texts_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..1f4632ef36f8d290c4d63ece1ee6fa0c596cf41b
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/vectorizer/object_texts_test.go
@@ -0,0 +1,229 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package vectorizer
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/weaviate/weaviate/entities/models"
+ basesettings "github.com/weaviate/weaviate/usecases/modulecomponents/settings"
+ "github.com/weaviate/weaviate/usecases/modules"
+)
+
+func TestVectorizingObjects_AllPropertyTypes(t *testing.T) {
+ className := "TestClass"
+ var nilAnyArray []any
+ asTime := func(date string) time.Time {
+ if asTime, err := time.Parse(time.RFC3339, date); err == nil {
+ return asTime
+ }
+ // fallback to current time, this will surely fail tests
+ return time.Now()
+ }
+ tests := []struct {
+ name string
+ object *models.Object
+ vectorizableProperties []string
+ lowerCaseInput bool
+ titlePropertyName string
+ wantCorpi string
+ weantTitlePropertyValue string
+ }{
+ {
+ name: "empty properties",
+ object: &models.Object{Class: className},
+ wantCorpi: "Test Class",
+ },
+ {
+ name: "nil property",
+ object: &models.Object{Class: className, Properties: map[string]any{
+ "nil_prop": nilAnyArray,
+ }},
+ vectorizableProperties: []string{"nil_prop"},
+ wantCorpi: "Test Class",
+ },
+ {
+ name: "explicit nil property",
+ object: &models.Object{Class: className, Properties: map[string]any{
+ "nil_prop": nil,
+ }},
+ vectorizableProperties: []string{"nil_prop"},
+ wantCorpi: "Test Class",
+ },
+ {
+ name: "string property",
+ object: &models.Object{Class: className, Properties: map[string]any{
+ "string_prop": "value of string property with it's OWN Casing",
+ }},
+ vectorizableProperties: []string{"string_prop"},
+ wantCorpi: "value of string property with it's OWN Casing",
+ },
+ {
+ name: "string array property",
+ object: &models.Object{Class: className, Properties: map[string]any{
+ "string_array": []string{"value", "FROM", "String", "Property"},
+ }},
+ vectorizableProperties: []string{"string_array"},
+ wantCorpi: "value FROM String Property",
+ },
+ {
+ name: "string and string array property",
+ object: &models.Object{Class: className, Properties: map[string]any{
+ "a_string_prop": "value of string property with it's OWN Casing",
+ "string_array": []string{"value", "FROM", "String", "Property"},
+ }},
+ vectorizableProperties: []string{"a_string_prop", "string_array"},
+ wantCorpi: "value of string property with it's OWN Casing value FROM String Property",
+ },
+ {
+ name: "int array property",
+ object: &models.Object{Class: className, Properties: map[string]any{
+ "int_array_prop": []int64{1, 2, 3},
+ }},
+ vectorizableProperties: []string{"int_array_prop"},
+ wantCorpi: "1 2 3",
+ },
+ {
+ name: "int array property as []int{}",
+ object: &models.Object{Class: className, Properties: map[string]any{
+ "int_array_prop": []int{1, 2, 3},
+ }},
+ vectorizableProperties: []string{"int_array_prop"},
+ wantCorpi: "1 2 3",
+ },
+ {
+ name: "int array property as []any{int64}",
+ object: &models.Object{Class: className, Properties: map[string]any{
+ "int_array_prop": []any{int64(1), int64(2), int64(3)},
+ }},
+ vectorizableProperties: []string{"int_array_prop"},
+ wantCorpi: "1 2 3",
+ },
+ {
+ name: "number property",
+ object: &models.Object{Class: className, Properties: map[string]any{
+ "number_prop": float64(1.1),
+ }},
+ vectorizableProperties: []string{"number_prop"},
+ wantCorpi: "1.1",
+ },
+ {
+ name: "number property as json.Number",
+ object: &models.Object{Class: className, Properties: map[string]any{
+ "number_prop": json.Number("1.1"),
+ }},
+ vectorizableProperties: []string{"number_prop"},
+ wantCorpi: "1.1",
+ },
+ {
+ name: "number array property",
+ object: &models.Object{Class: className, Properties: map[string]any{
+ "number_array_prop": []float64{1.1, 2.00002, 3},
+ }},
+ vectorizableProperties: []string{"number_array_prop"},
+ wantCorpi: "1.1 2.00002 3",
+ },
+ {
+ name: "number array property as []any{float64}",
+ object: &models.Object{Class: className, Properties: map[string]any{
+ "number_array_prop": []any{float64(1.1), float64(2.00002), float64(3)},
+ }},
+ vectorizableProperties: []string{"number_array_prop"},
+ wantCorpi: "1.1 2.00002 3",
+ },
+ {
+ name: "object array property as []any{map[string]any}",
+ object: &models.Object{Class: className, Properties: map[string]any{
+ "object_array": []any{map[string]any{"name": "something"}, map[string]any{"name": "something else"}},
+ }},
+ vectorizableProperties: []string{"object_array"},
+ wantCorpi: `[{"name":"something"},{"name":"something else"}]`,
+ },
+ {
+ name: "object array property as []map[string]any",
+ object: &models.Object{Class: className, Properties: map[string]any{
+ "object_array": []map[string]any{{"name": "something"}, {"name": "something else"}},
+ }},
+ vectorizableProperties: []string{"object_array"},
+ wantCorpi: `[{"name":"something"},{"name":"something else"}]`,
+ },
+ {
+ name: "object property",
+ object: &models.Object{Class: className, Properties: map[string]any{
+ "simple_obj": map[string]any{"name": map[string]any{"name": "something else"}},
+ }},
+ vectorizableProperties: []string{"simple_obj"},
+ wantCorpi: `{"name":{"name":"something else"}}`,
+ },
+ {
+ name: "boolean property",
+ object: &models.Object{Class: className, Properties: map[string]any{
+ "boolean_prop": true,
+ }},
+ vectorizableProperties: []string{"boolean_prop"},
+ wantCorpi: "true",
+ },
+ {
+ name: "boolean array property",
+ object: &models.Object{Class: className, Properties: map[string]any{
+ "boolean_array_prop": []bool{false, true, true},
+ }},
+ vectorizableProperties: []string{"boolean_array_prop"},
+ wantCorpi: "false true true",
+ },
+ {
+ name: "boolean array property as []any{bool}",
+ object: &models.Object{Class: className, Properties: map[string]any{
+ "boolean_array_prop": []any{false, true, true},
+ }},
+ vectorizableProperties: []string{"boolean_array_prop"},
+ wantCorpi: "false true true",
+ },
+ {
+ name: "date property as time.Time",
+ object: &models.Object{Class: className, Properties: map[string]any{
+ "date_prop": asTime("2011-05-05T07:16:30+02:00"),
+ }},
+ vectorizableProperties: []string{"date_prop"},
+ wantCorpi: "2011-05-05T07:16:30+02:00",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ targetVector := "targetVector"
+ class := &models.Class{
+ Class: className,
+ VectorConfig: map[string]models.VectorConfig{
+ targetVector: {
+ Vectorizer: map[string]any{
+ "my-module": map[string]any{
+ "vectorizeClassName": false,
+ "properties": tt.vectorizableProperties,
+ },
+ },
+ VectorIndexType: "hnsw",
+ },
+ },
+ }
+ cfg := modules.NewClassBasedModuleConfig(class, "my-module", "tenant", targetVector, nil)
+ icheck := basesettings.NewBaseClassSettings(cfg, tt.lowerCaseInput)
+ v := &ObjectVectorizer{}
+ corpi, titlePropertyValue := v.TextsWithTitleProperty(context.TODO(), tt.object, icheck, tt.titlePropertyName)
+ assert.Equal(t, tt.wantCorpi, corpi)
+ assert.Equal(t, tt.weantTitlePropertyValue, titlePropertyValue)
+ })
+ }
+}
diff --git a/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/vectorizer/objects_test.go b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/vectorizer/objects_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..dfa434616aff43e5581f91517fac2e1bcdc19429
--- /dev/null
+++ b/platform/dbops/binaries/weaviate-src/usecases/modulecomponents/vectorizer/objects_test.go
@@ -0,0 +1,289 @@
+// _ _
+// __ _____ __ ___ ___ __ _| |_ ___
+// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
+// \ V V / __/ (_| |\ V /| | (_| | || __/
+// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
+//
+// Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
+//
+// CONTACT: hello@weaviate.io
+//
+
+package vectorizer
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/weaviate/weaviate/entities/models"
+)
+
+func TestVectorizingObjects(t *testing.T) {
+ type testCase struct {
+ name string
+ input *models.Object
+ expectedClientCall string
+ expectedOpenAIType string
+ expectedOpenAIModel string
+ noindex string
+ excludedProperty string // to simulate a schema where property names aren't vectorized
+ excludedClass string // to simulate a schema where class names aren't vectorized
+ openAIType string
+ openAIModel string
+ openAIModelVersion string
+ lowerCase bool
+ }
+
+ tests := []testCase{
+ {
+ name: "empty object",
+ input: &models.Object{
+ Class: "Car",
+ },
+ openAIType: "text",
+ openAIModel: "ada",
+ expectedOpenAIType: "text",
+ expectedOpenAIModel: "ada",
+ expectedClientCall: "car",
+ lowerCase: true,
+ },
+ {
+ name: "object with one string prop",
+ input: &models.Object{
+ Class: "Car",
+ Properties: map[string]interface{}{
+ "brand": "Mercedes",
+ },
+ },
+ lowerCase: true,
+ expectedClientCall: "car brand mercedes",
+ },
+ {
+ name: "object with one non-string prop",
+ input: &models.Object{
+ Class: "Car",
+ Properties: map[string]interface{}{
+ "power": 300,
+ },
+ },
+ lowerCase: true,
+ expectedClientCall: "car",
+ },
+ {
+ name: "object with a mix of props",
+ input: &models.Object{
+ Class: "Car",
+ Properties: map[string]interface{}{
+ "brand": "best brand",
+ "power": 300,
+ "review": "a very great car",
+ },
+ },
+ lowerCase: true,
+ expectedClientCall: "car brand best brand review a very great car",
+ },
+ {
+ name: "with a noindexed property",
+ noindex: "review",
+ input: &models.Object{
+ Class: "Car",
+ Properties: map[string]interface{}{
+ "brand": "best brand",
+ "power": 300,
+ "review": "a very great car",
+ },
+ },
+ lowerCase: true,
+ expectedClientCall: "car brand best brand",
+ },
+ {
+ name: "with the class name not vectorized",
+ excludedClass: "Car",
+ input: &models.Object{
+ Class: "Car",
+ Properties: map[string]interface{}{
+ "brand": "best brand",
+ "power": 300,
+ "review": "a very great car",
+ },
+ },
+ lowerCase: true,
+ expectedClientCall: "brand best brand review a very great car",
+ },
+ {
+ name: "with a property name not vectorized",
+ excludedProperty: "review",
+ input: &models.Object{
+ Class: "Car",
+ Properties: map[string]interface{}{
+ "brand": "best brand",
+ "power": 300,
+ "review": "a very great car",
+ },
+ },
+ lowerCase: true,
+ expectedClientCall: "car brand best brand a very great car",
+ },
+ {
+ name: "with no schema labels vectorized",
+ excludedProperty: "review",
+ excludedClass: "Car",
+ input: &models.Object{
+ Class: "Car",
+ Properties: map[string]interface{}{
+ "review": "a very great car",
+ },
+ },
+ lowerCase: true,
+ expectedClientCall: "a very great car",
+ },
+ {
+ name: "with string/text arrays without propname or classname",
+ excludedProperty: "reviews",
+ excludedClass: "Car",
+ input: &models.Object{
+ Class: "Car",
+ Properties: map[string]interface{}{
+ "reviews": []string{
+ "a very great car",
+ "you should consider buying one",
+ },
+ },
+ },
+ lowerCase: true,
+ expectedClientCall: "a very great car you should consider buying one",
+ },
+ {
+ name: "with string/text arrays with propname and classname",
+ input: &models.Object{
+ Class: "Car",
+ Properties: map[string]interface{}{
+ "reviews": []string{
+ "a very great car",
+ "you should consider buying one",
+ },
+ },
+ },
+ lowerCase: true,
+ expectedClientCall: "car reviews a very great car reviews you should consider buying one",
+ },
+ {
+ name: "with compound class and prop names",
+ input: &models.Object{
+ Class: "SuperCar",
+ Properties: map[string]interface{}{
+ "brandOfTheCar": "best brand",
+ "power": 300,
+ "review": "a very great car",
+ },
+ },
+ lowerCase: true,
+ expectedClientCall: "super car brand of the car best brand review a very great car",
+ },
+ {
+ name: "with compound class and prop names",
+ input: &models.Object{
+ Class: "SuperCar",
+ Properties: map[string]interface{}{
+ "brandOfTheCar": "best brand",
+ "power": 300,
+ "review": "a very great car",
+ },
+ },
+ lowerCase: false,
+ expectedClientCall: "Super Car brand Of The Car best brand review a very great car",
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ v := New()
+
+ cfg := &fakeClassConfig{
+ classConfig: map[string]interface{}{
+ "vectorizeClassName": test.excludedClass != "Car",
+ "type": test.openAIType,
+ "model": test.openAIModel,
+ "modelVersion": test.openAIModelVersion,
+ },
+ vectorizePropertyName: true,
+ skippedProperty: test.noindex,
+ excludedProperty: test.excludedProperty,
+ lowerCase: test.lowerCase,
+ }
+ text := v.Texts(context.Background(), test.input, cfg)
+ assert.Equal(t, test.expectedClientCall, text)
+ })
+ }
+}
+
+type fakeClassConfig struct {
+ classConfig map[string]interface{}
+ vectorizePropertyName bool
+ skippedProperty string
+ excludedProperty string
+ lowerCase bool
+}
+
+func (f fakeClassConfig) LowerCaseInput() bool {
+ return f.lowerCase
+}
+
+func (f fakeClassConfig) Class() map[string]interface{} {
+ return f.classConfig
+}
+
+func (f fakeClassConfig) ClassByModuleName(moduleName string) map[string]interface{} {
+ return f.classConfig
+}
+
+func (f fakeClassConfig) Property(propName string) map[string]interface{} {
+ if propName == f.skippedProperty {
+ return map[string]interface{}{
+ "skip": true,
+ }
+ }
+ if propName == f.excludedProperty {
+ return map[string]interface{}{
+ "vectorizePropertyName": false,
+ }
+ }
+ if f.vectorizePropertyName {
+ return map[string]interface{}{
+ "vectorizePropertyName": true,
+ }
+ }
+ return nil
+}
+
+func (f fakeClassConfig) Tenant() string {
+ return ""
+}
+
+func (f fakeClassConfig) TargetVector() string {
+ return ""
+}
+
+func (f fakeClassConfig) PropertyIndexed(property string) bool {
+ return f.skippedProperty != property
+}
+
+func (f fakeClassConfig) VectorizePropertyName(property string) bool {
+ if f.excludedProperty == property {
+ return false
+ }
+ return f.vectorizePropertyName
+}
+
+func (f fakeClassConfig) VectorizeClassName() bool {
+ vectorizeClassName, ok := f.classConfig["vectorizeClassName"]
+ if !ok {
+ return false
+ }
+ return vectorizeClassName.(bool)
+}
+
+func (f fakeClassConfig) Properties() []string {
+ return nil
+}