ADAPT-Chase commited on
Commit
5ed9edf
·
verified ·
1 Parent(s): 5dd7e60

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run.go +234 -0
  2. platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run_knn.go +62 -0
  3. platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run_worker.go +158 -0
  4. platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run_zeroshot.go +75 -0
  5. platform/dbops/binaries/weaviate-src/usecases/classification/classifier_test.go +619 -0
  6. platform/dbops/binaries/weaviate-src/usecases/classification/classifier_vector_repo.go +50 -0
  7. platform/dbops/binaries/weaviate-src/usecases/classification/fakes_for_test.go +477 -0
  8. platform/dbops/binaries/weaviate-src/usecases/classification/ref_meta.go +55 -0
  9. platform/dbops/binaries/weaviate-src/usecases/classification/ref_meta_test.go +98 -0
  10. platform/dbops/binaries/weaviate-src/usecases/classification/schema_for_test.go +219 -0
  11. platform/dbops/binaries/weaviate-src/usecases/classification/transactions.go +48 -0
  12. platform/dbops/binaries/weaviate-src/usecases/classification/validation.go +198 -0
  13. platform/dbops/binaries/weaviate-src/usecases/classification/validation_test.go +191 -0
  14. platform/dbops/binaries/weaviate-src/usecases/classification/writer.go +141 -0
  15. platform/dbops/binaries/weaviate-src/usecases/classification/writer_test.go +111 -0
  16. platform/dbops/binaries/weaviate-src/usecases/cluster/delegate.go +320 -0
  17. platform/dbops/binaries/weaviate-src/usecases/cluster/delegate_test.go +313 -0
  18. platform/dbops/binaries/weaviate-src/usecases/cluster/disk_use_unix.go +31 -0
  19. platform/dbops/binaries/weaviate-src/usecases/cluster/disk_use_windows.go +38 -0
  20. platform/dbops/binaries/weaviate-src/usecases/cluster/ideal_node_list.go +97 -0
  21. platform/dbops/binaries/weaviate-src/usecases/cluster/iterator.go +73 -0
  22. platform/dbops/binaries/weaviate-src/usecases/cluster/iterator_test.go +55 -0
  23. platform/dbops/binaries/weaviate-src/usecases/cluster/log_workaround.go +53 -0
  24. platform/dbops/binaries/weaviate-src/usecases/cluster/mock_node_selector.go +435 -0
  25. platform/dbops/binaries/weaviate-src/usecases/cluster/state.go +460 -0
  26. platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_broadcast.go +184 -0
  27. platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_broadcast_test.go +228 -0
  28. platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_read.go +69 -0
  29. platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_slowlog.go +161 -0
  30. platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_test.go +570 -0
  31. platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_write.go +660 -0
  32. platform/dbops/binaries/weaviate-src/usecases/config/authentication.go +83 -0
  33. platform/dbops/binaries/weaviate-src/usecases/config/authentication_test.go +98 -0
  34. platform/dbops/binaries/weaviate-src/usecases/config/authorization.go +48 -0
  35. platform/dbops/binaries/weaviate-src/usecases/config/authorization_test.go +64 -0
  36. platform/dbops/binaries/weaviate-src/usecases/config/auto_schema_test.go +65 -0
  37. platform/dbops/binaries/weaviate-src/usecases/config/config_handler.go +752 -0
  38. platform/dbops/binaries/weaviate-src/usecases/config/config_handler_test.go +294 -0
  39. platform/dbops/binaries/weaviate-src/usecases/config/environment.go +1629 -0
  40. platform/dbops/binaries/weaviate-src/usecases/config/environment_test.go +1293 -0
  41. platform/dbops/binaries/weaviate-src/usecases/config/helpers_for_test.go +28 -0
  42. platform/dbops/binaries/weaviate-src/usecases/config/parse_resource_strings.go +64 -0
  43. platform/dbops/binaries/weaviate-src/usecases/config/parse_resource_strings_test.go +55 -0
  44. platform/dbops/binaries/weaviate-src/usecases/config/runtimeconfig.go +263 -0
  45. platform/dbops/binaries/weaviate-src/usecases/config/runtimeconfig_test.go +345 -0
  46. platform/dbops/binaries/weaviate-src/usecases/connstate/manager.go +89 -0
  47. platform/dbops/binaries/weaviate-src/usecases/distributedtask/handler.go +81 -0
  48. platform/dbops/binaries/weaviate-src/usecases/distributedtask/handler_test.go +85 -0
  49. platform/dbops/binaries/weaviate-src/usecases/fakes/fake_cluster_state.go +75 -0
  50. platform/dbops/binaries/weaviate-src/usecases/fakes/fake_raft_address_resolver.go +25 -0
platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run.go ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package classification
13
+
14
+ import (
15
+ "context"
16
+ "fmt"
17
+ "runtime"
18
+ "time"
19
+
20
+ "github.com/go-openapi/strfmt"
21
+ "github.com/pkg/errors"
22
+ "github.com/sirupsen/logrus"
23
+ "github.com/weaviate/weaviate/entities/additional"
24
+ "github.com/weaviate/weaviate/entities/models"
25
+ "github.com/weaviate/weaviate/entities/modulecapabilities"
26
+ "github.com/weaviate/weaviate/entities/search"
27
+ )
28
+
29
+ // the contents of this file deal with anything about a classification run
30
+ // which is generic, whereas the individual classify_item fns can be found in
31
+ // the respective files such as classifier_run_knn.go
32
+
33
+ func (c *Classifier) run(params models.Classification,
34
+ filters Filters,
35
+ ) {
36
+ ctx, cancel := contextWithTimeout(30 * time.Minute)
37
+ defer cancel()
38
+
39
+ go c.monitorClassification(ctx, cancel, params.Class)
40
+
41
+ c.logBegin(params, filters)
42
+ unclassifiedItems, err := c.vectorRepo.GetUnclassified(ctx,
43
+ params.Class, params.ClassifyProperties, params.BasedOnProperties, filters.Source())
44
+ if err != nil {
45
+ c.failRunWithError(params, errors.Wrap(err, "retrieve to-be-classifieds"))
46
+ return
47
+ }
48
+
49
+ if len(unclassifiedItems) == 0 {
50
+ c.failRunWithError(params,
51
+ fmt.Errorf("no classes to be classified - did you run a previous classification already?"))
52
+ return
53
+ }
54
+ c.logItemsFetched(params, unclassifiedItems)
55
+
56
+ classifyItem, err := c.prepareRun(params, filters, unclassifiedItems)
57
+ if err != nil {
58
+ c.failRunWithError(params, errors.Wrap(err, "prepare classification"))
59
+ return
60
+ }
61
+
62
+ params, err = c.runItems(ctx, classifyItem, params, filters, unclassifiedItems)
63
+ if err != nil {
64
+ c.failRunWithError(params, err)
65
+ return
66
+ }
67
+
68
+ c.succeedRun(params)
69
+ }
70
+
71
+ func (c *Classifier) monitorClassification(ctx context.Context, cancelFn context.CancelFunc, className string) {
72
+ ticker := time.NewTicker(100 * time.Millisecond)
73
+ defer ticker.Stop()
74
+ for {
75
+ select {
76
+ case <-ctx.Done():
77
+ return
78
+ case <-ticker.C:
79
+ class := c.schemaGetter.ReadOnlyClass(className)
80
+ if class == nil {
81
+ cancelFn()
82
+ return
83
+ }
84
+ }
85
+ }
86
+ }
87
+
88
+ func (c *Classifier) prepareRun(params models.Classification, filters Filters,
89
+ unclassifiedItems []search.Result,
90
+ ) (ClassifyItemFn, error) {
91
+ c.logBeginPreparation(params)
92
+ defer c.logFinishPreparation(params)
93
+
94
+ if params.Type == "knn" {
95
+ return c.classifyItemUsingKNN, nil
96
+ }
97
+
98
+ if params.Type == "zeroshot" {
99
+ return c.classifyItemUsingZeroShot, nil
100
+ }
101
+
102
+ if c.modulesProvider != nil {
103
+ classifyItemFn, err := c.modulesProvider.GetClassificationFn(params.Class, params.Type,
104
+ c.getClassifyParams(params, filters, unclassifiedItems))
105
+ if err != nil {
106
+ return nil, errors.Wrapf(err, "cannot classify")
107
+ }
108
+ if classifyItemFn == nil {
109
+ return nil, errors.Errorf("cannot classify: empty classifier for %s", params.Type)
110
+ }
111
+ classification := &moduleClassification{classifyItemFn}
112
+ return classification.classifyFn, nil
113
+ }
114
+
115
+ return nil, errors.Errorf("unsupported type '%s', have no classify item fn for this", params.Type)
116
+ }
117
+
118
+ func (c *Classifier) getClassifyParams(params models.Classification,
119
+ filters Filters, unclassifiedItems []search.Result,
120
+ ) modulecapabilities.ClassifyParams {
121
+ return modulecapabilities.ClassifyParams{
122
+ GetClass: c.schemaGetter.ReadOnlyClass,
123
+ Params: params,
124
+ Filters: filters,
125
+ UnclassifiedItems: unclassifiedItems,
126
+ VectorRepo: c.vectorClassSearchRepo,
127
+ }
128
+ }
129
+
130
+ // runItems splits the job list into batches that can be worked on parallelly
131
+ // depending on the available CPUs
132
+ func (c *Classifier) runItems(ctx context.Context, classifyItem ClassifyItemFn, params models.Classification, filters Filters,
133
+ items []search.Result,
134
+ ) (models.Classification, error) {
135
+ workerCount := runtime.GOMAXPROCS(0)
136
+ if len(items) < workerCount {
137
+ workerCount = len(items)
138
+ }
139
+
140
+ workers := newRunWorkers(workerCount, classifyItem, params, filters, c.vectorRepo, c.logger)
141
+ workers.addJobs(items)
142
+ res := workers.work(ctx)
143
+
144
+ params.Meta.Completed = strfmt.DateTime(time.Now())
145
+ params.Meta.CountSucceeded = res.successCount
146
+ params.Meta.CountFailed = res.errorCount
147
+ params.Meta.Count = res.successCount + res.errorCount
148
+
149
+ return params, res.err
150
+ }
151
+
152
+ func (c *Classifier) succeedRun(params models.Classification) {
153
+ params.Status = models.ClassificationStatusCompleted
154
+ ctx, cancel := contextWithTimeout(2 * time.Second)
155
+ defer cancel()
156
+ err := c.repo.Put(ctx, params)
157
+ if err != nil {
158
+ c.logExecutionError("store succeeded run", err, params)
159
+ }
160
+ c.logFinish(params)
161
+ }
162
+
163
+ func (c *Classifier) failRunWithError(params models.Classification, err error) {
164
+ params.Status = models.ClassificationStatusFailed
165
+ params.Error = fmt.Sprintf("classification failed: %v", err)
166
+ err = c.repo.Put(context.Background(), params)
167
+ if err != nil {
168
+ c.logExecutionError("store failed run", err, params)
169
+ }
170
+ c.logFinish(params)
171
+ }
172
+
173
+ func (c *Classifier) extendItemWithObjectMeta(item *search.Result,
174
+ params models.Classification, classified []string,
175
+ ) {
176
+ // don't overwrite existing non-classification meta info
177
+ if item.AdditionalProperties == nil {
178
+ item.AdditionalProperties = models.AdditionalProperties{}
179
+ }
180
+
181
+ item.AdditionalProperties["classification"] = additional.Classification{
182
+ ID: params.ID,
183
+ Scope: params.ClassifyProperties,
184
+ ClassifiedFields: classified,
185
+ Completed: strfmt.DateTime(time.Now()),
186
+ }
187
+ }
188
+
189
+ func contextWithTimeout(d time.Duration) (context.Context, context.CancelFunc) {
190
+ return context.WithTimeout(context.Background(), d)
191
+ }
192
+
193
+ // Logging helper methods
194
+ func (c *Classifier) logBase(params models.Classification, event string) *logrus.Entry {
195
+ return c.logger.WithField("action", "classification_run").
196
+ WithField("event", event).
197
+ WithField("params", params).
198
+ WithField("classification_type", params.Type)
199
+ }
200
+
201
+ func (c *Classifier) logBegin(params models.Classification, filters Filters) {
202
+ c.logBase(params, "classification_begin").
203
+ WithField("filters", filters).
204
+ Debug("classification started")
205
+ }
206
+
207
+ func (c *Classifier) logFinish(params models.Classification) {
208
+ c.logBase(params, "classification_finish").
209
+ WithField("status", params.Status).
210
+ Debug("classification finished")
211
+ }
212
+
213
+ func (c *Classifier) logItemsFetched(params models.Classification, items search.Results) {
214
+ c.logBase(params, "classification_items_fetched").
215
+ WithField("status", params.Status).
216
+ WithField("item_count", len(items)).
217
+ Debug("fetched source items")
218
+ }
219
+
220
+ func (c *Classifier) logBeginPreparation(params models.Classification) {
221
+ c.logBase(params, "classification_preparation_begin").
222
+ Debug("begin run preparation")
223
+ }
224
+
225
+ func (c *Classifier) logFinishPreparation(params models.Classification) {
226
+ c.logBase(params, "classification_preparation_finish").
227
+ Debug("finish run preparation")
228
+ }
229
+
230
+ func (c *Classifier) logExecutionError(event string, err error, params models.Classification) {
231
+ c.logBase(params, event).
232
+ WithError(err).
233
+ Error("classification execution failure")
234
+ }
platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run_knn.go ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package classification
13
+
14
+ import (
15
+ "fmt"
16
+ "time"
17
+
18
+ "github.com/weaviate/weaviate/entities/models"
19
+ "github.com/weaviate/weaviate/entities/search"
20
+ )
21
+
22
+ func (c *Classifier) classifyItemUsingKNN(item search.Result, itemIndex int,
23
+ params models.Classification, filters Filters, writer Writer,
24
+ ) error {
25
+ ctx, cancel := contextWithTimeout(2 * time.Second)
26
+ defer cancel()
27
+
28
+ // this type assertion is safe to make, since we have passed the parsing stage
29
+ settings := params.Settings.(*ParamsKNN)
30
+
31
+ // K is guaranteed to be set by now, no danger in dereferencing the pointer
32
+ res, err := c.vectorRepo.AggregateNeighbors(ctx, item.Vector,
33
+ item.ClassName,
34
+ params.ClassifyProperties, int(*settings.K), filters.TrainingSet())
35
+ if err != nil {
36
+ return fmt.Errorf("classify %s/%s: %w", item.ClassName, item.ID, err)
37
+ }
38
+
39
+ var classified []string
40
+
41
+ for _, agg := range res {
42
+ meta := agg.Meta()
43
+ item.Schema.(map[string]interface{})[agg.Property] = models.MultipleRef{
44
+ &models.SingleRef{
45
+ Beacon: agg.Beacon,
46
+ Classification: meta,
47
+ },
48
+ }
49
+
50
+ // append list of actually classified (can differ from scope!) properties,
51
+ // so we can build the object meta information
52
+ classified = append(classified, agg.Property)
53
+ }
54
+
55
+ c.extendItemWithObjectMeta(&item, params, classified)
56
+ err = writer.Store(item)
57
+ if err != nil {
58
+ return fmt.Errorf("store %s/%s: %w", item.ClassName, item.ID, err)
59
+ }
60
+
61
+ return nil
62
+ }
platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run_worker.go ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package classification
13
+
14
+ import (
15
+ "context"
16
+ "sync"
17
+ "sync/atomic"
18
+
19
+ "github.com/sirupsen/logrus"
20
+ enterrors "github.com/weaviate/weaviate/entities/errors"
21
+
22
+ "github.com/pkg/errors"
23
+ "github.com/weaviate/weaviate/entities/errorcompounder"
24
+ "github.com/weaviate/weaviate/entities/models"
25
+ "github.com/weaviate/weaviate/entities/search"
26
+ )
27
+
28
+ type runWorker struct {
29
+ jobs []search.Result
30
+ successCount *int64
31
+ errorCount *int64
32
+ ec *errorcompounder.SafeErrorCompounder
33
+ classify ClassifyItemFn
34
+ batchWriter Writer
35
+ params models.Classification
36
+ filters Filters
37
+ id int
38
+ workerCount int
39
+ }
40
+
41
+ func (w *runWorker) addJob(job search.Result) {
42
+ w.jobs = append(w.jobs, job)
43
+ }
44
+
45
+ func (w *runWorker) work(ctx context.Context, wg *sync.WaitGroup) {
46
+ defer wg.Done()
47
+
48
+ for i, item := range w.jobs {
49
+ // check if the whole classification operation has been cancelled
50
+ // if yes, then abort the classifier worker
51
+ if err := ctx.Err(); err != nil {
52
+ w.ec.Add(err)
53
+ atomic.AddInt64(w.errorCount, 1)
54
+ break
55
+ }
56
+ originalIndex := (i * w.workerCount) + w.id
57
+ err := w.classify(item, originalIndex, w.params, w.filters, w.batchWriter)
58
+ if err != nil {
59
+ w.ec.Add(err)
60
+ atomic.AddInt64(w.errorCount, 1)
61
+ } else {
62
+ atomic.AddInt64(w.successCount, 1)
63
+ }
64
+ }
65
+ }
66
+
67
+ func newRunWorker(id int, workerCount int, rw *runWorkers) *runWorker {
68
+ return &runWorker{
69
+ successCount: rw.successCount,
70
+ errorCount: rw.errorCount,
71
+ ec: rw.ec,
72
+ params: rw.params,
73
+ filters: rw.filters,
74
+ classify: rw.classify,
75
+ batchWriter: rw.batchWriter,
76
+ id: id,
77
+ workerCount: workerCount,
78
+ }
79
+ }
80
+
81
+ type runWorkers struct {
82
+ workers []*runWorker
83
+ successCount *int64
84
+ errorCount *int64
85
+ ec *errorcompounder.SafeErrorCompounder
86
+ classify ClassifyItemFn
87
+ params models.Classification
88
+ filters Filters
89
+ batchWriter Writer
90
+ logger logrus.FieldLogger
91
+ }
92
+
93
+ func newRunWorkers(amount int, classifyFn ClassifyItemFn,
94
+ params models.Classification, filters Filters, vectorRepo vectorRepo, logger logrus.FieldLogger,
95
+ ) *runWorkers {
96
+ var successCount int64
97
+ var errorCount int64
98
+
99
+ rw := &runWorkers{
100
+ workers: make([]*runWorker, amount),
101
+ successCount: &successCount,
102
+ errorCount: &errorCount,
103
+ ec: &errorcompounder.SafeErrorCompounder{},
104
+ classify: classifyFn,
105
+ params: params,
106
+ filters: filters,
107
+ batchWriter: newBatchWriter(vectorRepo, logger),
108
+ logger: logger,
109
+ }
110
+
111
+ for i := 0; i < amount; i++ {
112
+ rw.workers[i] = newRunWorker(i, amount, rw)
113
+ }
114
+
115
+ return rw
116
+ }
117
+
118
+ func (ws *runWorkers) addJobs(jobs []search.Result) {
119
+ for i, job := range jobs {
120
+ ws.workers[i%len(ws.workers)].addJob(job)
121
+ }
122
+ }
123
+
124
+ func (ws *runWorkers) work(ctx context.Context) runWorkerResults {
125
+ ws.batchWriter.Start()
126
+
127
+ wg := &sync.WaitGroup{}
128
+ for _, worker := range ws.workers {
129
+ worker := worker
130
+ wg.Add(1)
131
+ enterrors.GoWrapper(func() { worker.work(ctx, wg) }, ws.logger)
132
+
133
+ }
134
+
135
+ wg.Wait()
136
+
137
+ res := ws.batchWriter.Stop()
138
+
139
+ if res.SuccessCount() != *ws.successCount || res.ErrorCount() != *ws.errorCount {
140
+ ws.ec.Add(errors.New("data save error"))
141
+ }
142
+
143
+ if res.Err() != nil {
144
+ ws.ec.Add(res.Err())
145
+ }
146
+
147
+ return runWorkerResults{
148
+ successCount: *ws.successCount,
149
+ errorCount: *ws.errorCount,
150
+ err: ws.ec.ToError(),
151
+ }
152
+ }
153
+
154
+ type runWorkerResults struct {
155
+ successCount int64
156
+ errorCount int64
157
+ err error
158
+ }
platform/dbops/binaries/weaviate-src/usecases/classification/classifier_run_zeroshot.go ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package classification
13
+
14
+ import (
15
+ "fmt"
16
+ "time"
17
+
18
+ "github.com/pkg/errors"
19
+ "github.com/weaviate/weaviate/entities/models"
20
+ "github.com/weaviate/weaviate/entities/schema/crossref"
21
+ "github.com/weaviate/weaviate/entities/search"
22
+ )
23
+
24
+ func (c *Classifier) classifyItemUsingZeroShot(item search.Result, itemIndex int,
25
+ params models.Classification, filters Filters, writer Writer,
26
+ ) error {
27
+ ctx, cancel := contextWithTimeout(2 * time.Second)
28
+ defer cancel()
29
+
30
+ properties := params.ClassifyProperties
31
+
32
+ class := c.schemaGetter.ReadOnlyClass(item.ClassName)
33
+ if class == nil {
34
+ return fmt.Errorf("zeroshot: search: could not find class %s in schema", item.ClassName)
35
+ }
36
+
37
+ classifyProp := []string{}
38
+ for _, prop := range properties {
39
+ for _, classProp := range class.Properties {
40
+ if classProp.Name == prop {
41
+ classifyProp = append(classifyProp, classProp.DataType...)
42
+ }
43
+ }
44
+ }
45
+
46
+ var classified []string
47
+ for _, className := range classifyProp {
48
+ for _, prop := range properties {
49
+ res, err := c.vectorRepo.ZeroShotSearch(ctx, item.Vector, className,
50
+ params.ClassifyProperties, filters.Target())
51
+ if err != nil {
52
+ return errors.Wrap(err, "zeroshot: search")
53
+ }
54
+
55
+ if len(res) > 0 {
56
+ cref := crossref.NewLocalhost(res[0].ClassName, res[0].ID)
57
+ item.Schema.(map[string]interface{})[prop] = models.MultipleRef{
58
+ &models.SingleRef{
59
+ Beacon: cref.SingleRef().Beacon,
60
+ Classification: &models.ReferenceMetaClassification{},
61
+ },
62
+ }
63
+ classified = append(classified, prop)
64
+ }
65
+ }
66
+ }
67
+
68
+ c.extendItemWithObjectMeta(&item, params, classified)
69
+ err := writer.Store(item)
70
+ if err != nil {
71
+ return errors.Errorf("store %s/%s: %v", item.ClassName, item.ID, err)
72
+ }
73
+
74
+ return nil
75
+ }
platform/dbops/binaries/weaviate-src/usecases/classification/classifier_test.go ADDED
@@ -0,0 +1,619 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package classification
13
+
14
+ import (
15
+ "context"
16
+ "encoding/json"
17
+ "errors"
18
+ "fmt"
19
+ "testing"
20
+ "time"
21
+
22
+ "github.com/go-openapi/strfmt"
23
+ "github.com/sirupsen/logrus"
24
+ "github.com/sirupsen/logrus/hooks/test"
25
+ "github.com/stretchr/testify/assert"
26
+ "github.com/stretchr/testify/require"
27
+
28
+ "github.com/weaviate/weaviate/entities/models"
29
+ testhelper "github.com/weaviate/weaviate/test/helper"
30
+ "github.com/weaviate/weaviate/usecases/auth/authorization/mocks"
31
+ )
32
+
33
+ func newNullLogger() *logrus.Logger {
34
+ log, _ := test.NewNullLogger()
35
+ return log
36
+ }
37
+
38
+ func Test_Classifier_KNN(t *testing.T) {
39
+ t.Run("with invalid data", func(t *testing.T) {
40
+ sg := &fakeSchemaGetter{testSchema()}
41
+ _, err := New(sg, nil, nil, mocks.NewMockAuthorizer(), newNullLogger(), nil).
42
+ Schedule(context.Background(), nil, models.Classification{})
43
+ assert.NotNil(t, err, "should error with invalid user input")
44
+ })
45
+
46
+ var id strfmt.UUID
47
+ // so we can reuse it for follow up requests, such as checking the status
48
+
49
+ t.Run("with valid data", func(t *testing.T) {
50
+ sg := &fakeSchemaGetter{testSchema()}
51
+ repo := newFakeClassificationRepo()
52
+ authorizer := mocks.NewMockAuthorizer()
53
+ vectorRepo := newFakeVectorRepoKNN(testDataToBeClassified(), testDataAlreadyClassified())
54
+ classifier := New(sg, repo, vectorRepo, authorizer, newNullLogger(), nil)
55
+
56
+ params := models.Classification{
57
+ Class: "Article",
58
+ BasedOnProperties: []string{"description"},
59
+ ClassifyProperties: []string{"exactCategory", "mainCategory"},
60
+ Settings: map[string]interface{}{
61
+ "k": json.Number("1"),
62
+ },
63
+ }
64
+
65
+ t.Run("scheduling a classification", func(t *testing.T) {
66
+ class, err := classifier.Schedule(context.Background(), nil, params)
67
+ require.Nil(t, err, "should not error")
68
+ require.NotNil(t, class)
69
+
70
+ assert.Len(t, class.ID, 36, "an id was assigned")
71
+ id = class.ID
72
+ })
73
+
74
+ t.Run("retrieving the same classification by id", func(t *testing.T) {
75
+ class, err := classifier.Get(context.Background(), nil, id)
76
+ require.Nil(t, err)
77
+ require.NotNil(t, class)
78
+ assert.Equal(t, id, class.ID)
79
+ assert.Equal(t, models.ClassificationStatusRunning, class.Status)
80
+ })
81
+
82
+ // TODO: improve by polling instead
83
+ time.Sleep(500 * time.Millisecond)
84
+
85
+ t.Run("status is now completed", func(t *testing.T) {
86
+ class, err := classifier.Get(context.Background(), nil, id)
87
+ require.Nil(t, err)
88
+ require.NotNil(t, class)
89
+ assert.Equal(t, models.ClassificationStatusCompleted, class.Status)
90
+ })
91
+
92
+ t.Run("the classifier updated the actions with the classified references", func(t *testing.T) {
93
+ vectorRepo.Lock()
94
+ require.Len(t, vectorRepo.db, 6)
95
+ vectorRepo.Unlock()
96
+
97
+ t.Run("food", func(t *testing.T) {
98
+ idArticleFoodOne := "06a1e824-889c-4649-97f9-1ed3fa401d8e"
99
+ idArticleFoodTwo := "6402e649-b1e0-40ea-b192-a64eab0d5e56"
100
+
101
+ checkRef(t, vectorRepo, idArticleFoodOne, "exactCategory", idCategoryFoodAndDrink)
102
+ checkRef(t, vectorRepo, idArticleFoodTwo, "mainCategory", idMainCategoryFoodAndDrink)
103
+ })
104
+
105
+ t.Run("politics", func(t *testing.T) {
106
+ idArticlePoliticsOne := "75ba35af-6a08-40ae-b442-3bec69b355f9"
107
+ idArticlePoliticsTwo := "f850439a-d3cd-4f17-8fbf-5a64405645cd"
108
+
109
+ checkRef(t, vectorRepo, idArticlePoliticsOne, "exactCategory", idCategoryPolitics)
110
+ checkRef(t, vectorRepo, idArticlePoliticsTwo, "mainCategory", idMainCategoryPoliticsAndSociety)
111
+ })
112
+
113
+ t.Run("society", func(t *testing.T) {
114
+ idArticleSocietyOne := "a2bbcbdc-76e1-477d-9e72-a6d2cfb50109"
115
+ idArticleSocietyTwo := "069410c3-4b9e-4f68-8034-32a066cb7997"
116
+
117
+ checkRef(t, vectorRepo, idArticleSocietyOne, "exactCategory", idCategorySociety)
118
+ checkRef(t, vectorRepo, idArticleSocietyTwo, "mainCategory", idMainCategoryPoliticsAndSociety)
119
+ })
120
+ })
121
+ })
122
+
123
+ t.Run("when errors occur during classification", func(t *testing.T) {
124
+ sg := &fakeSchemaGetter{testSchema()}
125
+ repo := newFakeClassificationRepo()
126
+ authorizer := mocks.NewMockAuthorizer()
127
+ vectorRepo := newFakeVectorRepoKNN(testDataToBeClassified(), testDataAlreadyClassified())
128
+ vectorRepo.errorOnAggregate = errors.New("something went wrong")
129
+ classifier := New(sg, repo, vectorRepo, authorizer, newNullLogger(), nil)
130
+
131
+ params := models.Classification{
132
+ Class: "Article",
133
+ BasedOnProperties: []string{"description"},
134
+ ClassifyProperties: []string{"exactCategory", "mainCategory"},
135
+ Settings: map[string]interface{}{
136
+ "k": json.Number("1"),
137
+ },
138
+ }
139
+
140
+ t.Run("scheduling a classification", func(t *testing.T) {
141
+ class, err := classifier.Schedule(context.Background(), nil, params)
142
+ require.Nil(t, err, "should not error")
143
+ require.NotNil(t, class)
144
+
145
+ assert.Len(t, class.ID, 36, "an id was assigned")
146
+ id = class.ID
147
+ })
148
+
149
+ waitForStatusToNoLongerBeRunning(t, classifier, id)
150
+
151
+ t.Run("status is now failed", func(t *testing.T) {
152
+ class, err := classifier.Get(context.Background(), nil, id)
153
+ require.Nil(t, err)
154
+ require.NotNil(t, class)
155
+ assert.Equal(t, models.ClassificationStatusFailed, class.Status)
156
+ expectedErrStrings := []string{
157
+ "classification failed: ",
158
+ "classify Article/75ba35af-6a08-40ae-b442-3bec69b355f9: something went wrong",
159
+ "classify Article/f850439a-d3cd-4f17-8fbf-5a64405645cd: something went wrong",
160
+ "classify Article/a2bbcbdc-76e1-477d-9e72-a6d2cfb50109: something went wrong",
161
+ "classify Article/069410c3-4b9e-4f68-8034-32a066cb7997: something went wrong",
162
+ "classify Article/06a1e824-889c-4649-97f9-1ed3fa401d8e: something went wrong",
163
+ "classify Article/6402e649-b1e0-40ea-b192-a64eab0d5e56: something went wrong",
164
+ }
165
+
166
+ for _, msg := range expectedErrStrings {
167
+ assert.Contains(t, class.Error, msg)
168
+ }
169
+ })
170
+ })
171
+
172
+ t.Run("when there is nothing to be classified", func(t *testing.T) {
173
+ sg := &fakeSchemaGetter{testSchema()}
174
+ repo := newFakeClassificationRepo()
175
+ authorizer := mocks.NewMockAuthorizer()
176
+ vectorRepo := newFakeVectorRepoKNN(nil, testDataAlreadyClassified())
177
+ classifier := New(sg, repo, vectorRepo, authorizer, newNullLogger(), nil)
178
+
179
+ params := models.Classification{
180
+ Class: "Article",
181
+ BasedOnProperties: []string{"description"},
182
+ ClassifyProperties: []string{"exactCategory", "mainCategory"},
183
+ Settings: map[string]interface{}{
184
+ "k": json.Number("1"),
185
+ },
186
+ }
187
+
188
+ t.Run("scheduling a classification", func(t *testing.T) {
189
+ class, err := classifier.Schedule(context.Background(), nil, params)
190
+ require.Nil(t, err, "should not error")
191
+ require.NotNil(t, class)
192
+
193
+ assert.Len(t, class.ID, 36, "an id was assigned")
194
+ id = class.ID
195
+ })
196
+
197
+ waitForStatusToNoLongerBeRunning(t, classifier, id)
198
+
199
+ t.Run("status is now failed", func(t *testing.T) {
200
+ class, err := classifier.Get(context.Background(), nil, id)
201
+ require.Nil(t, err)
202
+ require.NotNil(t, class)
203
+ assert.Equal(t, models.ClassificationStatusFailed, class.Status)
204
+ expectedErr := "classification failed: " +
205
+ "no classes to be classified - did you run a previous classification already?"
206
+ assert.Equal(t, expectedErr, class.Error)
207
+ })
208
+ })
209
+ }
210
+
211
+ func Test_Classifier_Custom_Classifier(t *testing.T) {
212
+ var id strfmt.UUID
213
+ // so we can reuse it for follow up requests, such as checking the status
214
+
215
+ t.Run("with unreconginzed custom module classifier name", func(t *testing.T) {
216
+ sg := &fakeSchemaGetter{testSchema()}
217
+ repo := newFakeClassificationRepo()
218
+ authorizer := mocks.NewMockAuthorizer()
219
+
220
+ vectorRepo := newFakeVectorRepoContextual(testDataToBeClassified(), testDataPossibleTargets())
221
+ logger, _ := test.NewNullLogger()
222
+
223
+ // vectorizer := &fakeVectorizer{words: testDataVectors()}
224
+ modulesProvider := NewFakeModulesProvider()
225
+ classifier := New(sg, repo, vectorRepo, authorizer, logger, modulesProvider)
226
+
227
+ notRecoginzedContextual := "text2vec-contextionary-custom-not-recognized"
228
+ params := models.Classification{
229
+ Class: "Article",
230
+ BasedOnProperties: []string{"description"},
231
+ ClassifyProperties: []string{"exactCategory", "mainCategory"},
232
+ Type: notRecoginzedContextual,
233
+ }
234
+
235
+ t.Run("scheduling an unrecognized classification", func(t *testing.T) {
236
+ class, err := classifier.Schedule(context.Background(), nil, params)
237
+ require.Nil(t, err, "should not error")
238
+ require.NotNil(t, class)
239
+
240
+ assert.Len(t, class.ID, 36, "an id was assigned")
241
+ id = class.ID
242
+ })
243
+
244
+ t.Run("retrieving the same classification by id", func(t *testing.T) {
245
+ class, err := classifier.Get(context.Background(), nil, id)
246
+ require.Nil(t, err)
247
+ require.NotNil(t, class)
248
+ assert.Equal(t, id, class.ID)
249
+ })
250
+
251
+ // TODO: improve by polling instead
252
+ time.Sleep(500 * time.Millisecond)
253
+
254
+ t.Run("status is failed", func(t *testing.T) {
255
+ class, err := classifier.Get(context.Background(), nil, id)
256
+ require.Nil(t, err)
257
+ require.NotNil(t, class)
258
+ assert.Equal(t, models.ClassificationStatusFailed, class.Status)
259
+ assert.Equal(t, notRecoginzedContextual, class.Type)
260
+ assert.Contains(t, class.Error, "classifier "+notRecoginzedContextual+" not found")
261
+ })
262
+ })
263
+
264
+ t.Run("with valid data", func(t *testing.T) {
265
+ sg := &fakeSchemaGetter{testSchema()}
266
+ repo := newFakeClassificationRepo()
267
+ authorizer := mocks.NewMockAuthorizer()
268
+
269
+ vectorRepo := newFakeVectorRepoContextual(testDataToBeClassified(), testDataPossibleTargets())
270
+ logger, _ := test.NewNullLogger()
271
+
272
+ modulesProvider := NewFakeModulesProvider()
273
+ classifier := New(sg, repo, vectorRepo, authorizer, logger, modulesProvider)
274
+
275
+ contextual := "text2vec-contextionary-custom-contextual"
276
+ params := models.Classification{
277
+ Class: "Article",
278
+ BasedOnProperties: []string{"description"},
279
+ ClassifyProperties: []string{"exactCategory", "mainCategory"},
280
+ Type: contextual,
281
+ }
282
+
283
+ t.Run("scheduling a classification", func(t *testing.T) {
284
+ class, err := classifier.Schedule(context.Background(), nil, params)
285
+ require.Nil(t, err, "should not error")
286
+ require.NotNil(t, class)
287
+
288
+ assert.Len(t, class.ID, 36, "an id was assigned")
289
+ id = class.ID
290
+ })
291
+
292
+ t.Run("retrieving the same classification by id", func(t *testing.T) {
293
+ class, err := classifier.Get(context.Background(), nil, id)
294
+ require.Nil(t, err)
295
+ require.NotNil(t, class)
296
+ assert.Equal(t, id, class.ID)
297
+ })
298
+
299
+ // TODO: improve by polling instead
300
+ time.Sleep(500 * time.Millisecond)
301
+
302
+ t.Run("status is now completed", func(t *testing.T) {
303
+ class, err := classifier.Get(context.Background(), nil, id)
304
+ require.Nil(t, err)
305
+ require.NotNil(t, class)
306
+ assert.Equal(t, models.ClassificationStatusCompleted, class.Status)
307
+ })
308
+
309
+ t.Run("the classifier updated the actions with the classified references", func(t *testing.T) {
310
+ vectorRepo.Lock()
311
+ require.Len(t, vectorRepo.db, 6)
312
+ vectorRepo.Unlock()
313
+
314
+ t.Run("food", func(t *testing.T) {
315
+ idArticleFoodOne := "06a1e824-889c-4649-97f9-1ed3fa401d8e"
316
+ idArticleFoodTwo := "6402e649-b1e0-40ea-b192-a64eab0d5e56"
317
+
318
+ checkRef(t, vectorRepo, idArticleFoodOne, "exactCategory", idCategoryFoodAndDrink)
319
+ checkRef(t, vectorRepo, idArticleFoodTwo, "mainCategory", idMainCategoryFoodAndDrink)
320
+ })
321
+
322
+ t.Run("politics", func(t *testing.T) {
323
+ idArticlePoliticsOne := "75ba35af-6a08-40ae-b442-3bec69b355f9"
324
+ idArticlePoliticsTwo := "f850439a-d3cd-4f17-8fbf-5a64405645cd"
325
+
326
+ checkRef(t, vectorRepo, idArticlePoliticsOne, "exactCategory", idCategoryPolitics)
327
+ checkRef(t, vectorRepo, idArticlePoliticsTwo, "mainCategory", idMainCategoryPoliticsAndSociety)
328
+ })
329
+
330
+ t.Run("society", func(t *testing.T) {
331
+ idArticleSocietyOne := "a2bbcbdc-76e1-477d-9e72-a6d2cfb50109"
332
+ idArticleSocietyTwo := "069410c3-4b9e-4f68-8034-32a066cb7997"
333
+
334
+ checkRef(t, vectorRepo, idArticleSocietyOne, "exactCategory", idCategorySociety)
335
+ checkRef(t, vectorRepo, idArticleSocietyTwo, "mainCategory", idMainCategoryPoliticsAndSociety)
336
+ })
337
+ })
338
+ })
339
+
340
+ t.Run("when errors occur during classification", func(t *testing.T) {
341
+ sg := &fakeSchemaGetter{testSchema()}
342
+ repo := newFakeClassificationRepo()
343
+ authorizer := mocks.NewMockAuthorizer()
344
+ vectorRepo := newFakeVectorRepoKNN(testDataToBeClassified(), testDataAlreadyClassified())
345
+ vectorRepo.errorOnAggregate = errors.New("something went wrong")
346
+ logger, _ := test.NewNullLogger()
347
+ classifier := New(sg, repo, vectorRepo, authorizer, logger, nil)
348
+
349
+ params := models.Classification{
350
+ Class: "Article",
351
+ BasedOnProperties: []string{"description"},
352
+ ClassifyProperties: []string{"exactCategory", "mainCategory"},
353
+ Settings: map[string]interface{}{
354
+ "k": json.Number("1"),
355
+ },
356
+ }
357
+
358
+ t.Run("scheduling a classification", func(t *testing.T) {
359
+ class, err := classifier.Schedule(context.Background(), nil, params)
360
+ require.Nil(t, err, "should not error")
361
+ require.NotNil(t, class)
362
+
363
+ assert.Len(t, class.ID, 36, "an id was assigned")
364
+ id = class.ID
365
+ })
366
+
367
+ waitForStatusToNoLongerBeRunning(t, classifier, id)
368
+
369
+ t.Run("status is now failed", func(t *testing.T) {
370
+ class, err := classifier.Get(context.Background(), nil, id)
371
+ require.Nil(t, err)
372
+ require.NotNil(t, class)
373
+ assert.Equal(t, models.ClassificationStatusFailed, class.Status)
374
+ expectedErrStrings := []string{
375
+ "classification failed: ",
376
+ "classify Article/75ba35af-6a08-40ae-b442-3bec69b355f9: something went wrong",
377
+ "classify Article/f850439a-d3cd-4f17-8fbf-5a64405645cd: something went wrong",
378
+ "classify Article/a2bbcbdc-76e1-477d-9e72-a6d2cfb50109: something went wrong",
379
+ "classify Article/069410c3-4b9e-4f68-8034-32a066cb7997: something went wrong",
380
+ "classify Article/06a1e824-889c-4649-97f9-1ed3fa401d8e: something went wrong",
381
+ "classify Article/6402e649-b1e0-40ea-b192-a64eab0d5e56: something went wrong",
382
+ }
383
+ for _, msg := range expectedErrStrings {
384
+ assert.Contains(t, class.Error, msg)
385
+ }
386
+ })
387
+ })
388
+
389
+ t.Run("when there is nothing to be classified", func(t *testing.T) {
390
+ sg := &fakeSchemaGetter{testSchema()}
391
+ repo := newFakeClassificationRepo()
392
+ authorizer := mocks.NewMockAuthorizer()
393
+ vectorRepo := newFakeVectorRepoKNN(nil, testDataAlreadyClassified())
394
+ logger, _ := test.NewNullLogger()
395
+ classifier := New(sg, repo, vectorRepo, authorizer, logger, nil)
396
+
397
+ params := models.Classification{
398
+ Class: "Article",
399
+ BasedOnProperties: []string{"description"},
400
+ ClassifyProperties: []string{"exactCategory", "mainCategory"},
401
+ Settings: map[string]interface{}{
402
+ "k": json.Number("1"),
403
+ },
404
+ }
405
+
406
+ t.Run("scheduling a classification", func(t *testing.T) {
407
+ class, err := classifier.Schedule(context.Background(), nil, params)
408
+ require.Nil(t, err, "should not error")
409
+ require.NotNil(t, class)
410
+
411
+ assert.Len(t, class.ID, 36, "an id was assigned")
412
+ id = class.ID
413
+ })
414
+
415
+ waitForStatusToNoLongerBeRunning(t, classifier, id)
416
+
417
+ t.Run("status is now failed", func(t *testing.T) {
418
+ class, err := classifier.Get(context.Background(), nil, id)
419
+ require.Nil(t, err)
420
+ require.NotNil(t, class)
421
+ assert.Equal(t, models.ClassificationStatusFailed, class.Status)
422
+ expectedErr := "classification failed: " +
423
+ "no classes to be classified - did you run a previous classification already?"
424
+ assert.Equal(t, expectedErr, class.Error)
425
+ })
426
+ })
427
+ }
428
+
429
+ func Test_Classifier_WhereFilterValidation(t *testing.T) {
430
+ t.Run("when invalid whereFilters are received", func(t *testing.T) {
431
+ sg := &fakeSchemaGetter{testSchema()}
432
+ repo := newFakeClassificationRepo()
433
+ authorizer := mocks.NewMockAuthorizer()
434
+ vectorRepo := newFakeVectorRepoKNN(testDataToBeClassified(), testDataAlreadyClassified())
435
+ classifier := New(sg, repo, vectorRepo, authorizer, newNullLogger(), nil)
436
+
437
+ t.Run("with only one of the where filters being set", func(t *testing.T) {
438
+ whereFilter := &models.WhereFilter{
439
+ Path: []string{"id"},
440
+ Operator: "Like",
441
+ ValueText: ptString("*"),
442
+ }
443
+ testData := []struct {
444
+ name string
445
+ classificationType string
446
+ classificationFilters *models.ClassificationFilters
447
+ }{
448
+ {
449
+ name: "Contextual only source where filter set",
450
+ classificationType: TypeContextual,
451
+ classificationFilters: &models.ClassificationFilters{
452
+ SourceWhere: whereFilter,
453
+ },
454
+ },
455
+ {
456
+ name: "Contextual only target where filter set",
457
+ classificationType: TypeContextual,
458
+ classificationFilters: &models.ClassificationFilters{
459
+ TargetWhere: whereFilter,
460
+ },
461
+ },
462
+ {
463
+ name: "ZeroShot only source where filter set",
464
+ classificationType: TypeZeroShot,
465
+ classificationFilters: &models.ClassificationFilters{
466
+ SourceWhere: whereFilter,
467
+ },
468
+ },
469
+ {
470
+ name: "ZeroShot only target where filter set",
471
+ classificationType: TypeZeroShot,
472
+ classificationFilters: &models.ClassificationFilters{
473
+ TargetWhere: whereFilter,
474
+ },
475
+ },
476
+ {
477
+ name: "KNN only source where filter set",
478
+ classificationType: TypeKNN,
479
+ classificationFilters: &models.ClassificationFilters{
480
+ SourceWhere: whereFilter,
481
+ },
482
+ },
483
+ {
484
+ name: "KNN only training set where filter set",
485
+ classificationType: TypeKNN,
486
+ classificationFilters: &models.ClassificationFilters{
487
+ TrainingSetWhere: whereFilter,
488
+ },
489
+ },
490
+ }
491
+ for _, td := range testData {
492
+ t.Run(td.name, func(t *testing.T) {
493
+ params := models.Classification{
494
+ Class: "Article",
495
+ BasedOnProperties: []string{"description"},
496
+ ClassifyProperties: []string{"exactCategory", "mainCategory"},
497
+ Settings: map[string]interface{}{
498
+ "k": json.Number("1"),
499
+ },
500
+ Type: td.classificationType,
501
+ Filters: td.classificationFilters,
502
+ }
503
+ class, err := classifier.Schedule(context.Background(), nil, params)
504
+ assert.Nil(t, err)
505
+ assert.NotNil(t, class)
506
+
507
+ assert.Len(t, class.ID, 36, "an id was assigned")
508
+ waitForStatusToNoLongerBeRunning(t, classifier, class.ID)
509
+ })
510
+ }
511
+ })
512
+ })
513
+
514
+ t.Run("[deprecated string] when valueString whereFilters are received", func(t *testing.T) {
515
+ sg := &fakeSchemaGetter{testSchema()}
516
+ repo := newFakeClassificationRepo()
517
+ authorizer := mocks.NewMockAuthorizer()
518
+ vectorRepo := newFakeVectorRepoKNN(testDataToBeClassified(), testDataAlreadyClassified())
519
+ classifier := New(sg, repo, vectorRepo, authorizer, newNullLogger(), nil)
520
+
521
+ validFilter := &models.WhereFilter{
522
+ Path: []string{"description"},
523
+ Operator: "Equal",
524
+ ValueText: ptString("valueText is valid"),
525
+ }
526
+ deprecatedFilter := &models.WhereFilter{
527
+ Path: []string{"description"},
528
+ Operator: "Equal",
529
+ ValueString: ptString("valueString is accepted"),
530
+ }
531
+
532
+ t.Run("with deprecated sourceFilter", func(t *testing.T) {
533
+ params := models.Classification{
534
+ Class: "Article",
535
+ BasedOnProperties: []string{"description"},
536
+ ClassifyProperties: []string{"exactCategory", "mainCategory"},
537
+ Settings: map[string]interface{}{
538
+ "k": json.Number("1"),
539
+ },
540
+ Filters: &models.ClassificationFilters{
541
+ SourceWhere: deprecatedFilter,
542
+ },
543
+ Type: TypeContextual,
544
+ }
545
+
546
+ _, err := classifier.Schedule(context.Background(), nil, params)
547
+ assert.Nil(t, err)
548
+ })
549
+
550
+ t.Run("with deprecated targetFilter", func(t *testing.T) {
551
+ params := models.Classification{
552
+ Class: "Article",
553
+ BasedOnProperties: []string{"description"},
554
+ ClassifyProperties: []string{"exactCategory", "mainCategory"},
555
+ Settings: map[string]interface{}{
556
+ "k": json.Number("1"),
557
+ },
558
+ Filters: &models.ClassificationFilters{
559
+ SourceWhere: validFilter,
560
+ TargetWhere: deprecatedFilter,
561
+ },
562
+ Type: TypeContextual,
563
+ }
564
+
565
+ _, err := classifier.Schedule(context.Background(), nil, params)
566
+ assert.Nil(t, err)
567
+ })
568
+
569
+ t.Run("with deprecated trainingFilter", func(t *testing.T) {
570
+ params := models.Classification{
571
+ Class: "Article",
572
+ BasedOnProperties: []string{"description"},
573
+ ClassifyProperties: []string{"exactCategory", "mainCategory"},
574
+ Settings: map[string]interface{}{
575
+ "k": json.Number("1"),
576
+ },
577
+ Filters: &models.ClassificationFilters{
578
+ SourceWhere: validFilter,
579
+ TrainingSetWhere: deprecatedFilter,
580
+ },
581
+ Type: TypeKNN,
582
+ }
583
+
584
+ _, err := classifier.Schedule(context.Background(), nil, params)
585
+ assert.Nil(t, err)
586
+ })
587
+ })
588
+ }
589
+
590
+ type genericFakeRepo interface {
591
+ get(strfmt.UUID) (*models.Object, bool)
592
+ }
593
+
594
+ func checkRef(t *testing.T, repo genericFakeRepo, source, propName, target string) {
595
+ object, ok := repo.get(strfmt.UUID(source))
596
+ require.True(t, ok, "object must be present")
597
+
598
+ schema, ok := object.Properties.(map[string]interface{})
599
+ require.True(t, ok, "schema must be map")
600
+
601
+ prop, ok := schema[propName]
602
+ require.True(t, ok, "ref prop must be present")
603
+
604
+ refs, ok := prop.(models.MultipleRef)
605
+ require.True(t, ok, "ref prop must be models.MultipleRef")
606
+ require.Len(t, refs, 1, "refs must have len 1")
607
+
608
+ assert.Equal(t, fmt.Sprintf("weaviate://localhost/%s", target), refs[0].Beacon.String(), "beacon must match")
609
+ }
610
+
611
+ func waitForStatusToNoLongerBeRunning(t *testing.T, classifier *Classifier, id strfmt.UUID) {
612
+ testhelper.AssertEventuallyEqualWithFrequencyAndTimeout(t, true, func() interface{} {
613
+ class, err := classifier.Get(context.Background(), nil, id)
614
+ require.Nil(t, err)
615
+ require.NotNil(t, class)
616
+
617
+ return class.Status != models.ClassificationStatusRunning
618
+ }, 100*time.Millisecond, 20*time.Second, "wait until status in no longer running")
619
+ }
platform/dbops/binaries/weaviate-src/usecases/classification/classifier_vector_repo.go ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package classification
13
+
14
+ import (
15
+ "context"
16
+
17
+ "github.com/weaviate/weaviate/entities/dto"
18
+ "github.com/weaviate/weaviate/entities/modulecapabilities"
19
+ "github.com/weaviate/weaviate/entities/search"
20
+ )
21
+
22
+ type vectorClassSearchRepo struct {
23
+ vectorRepo vectorRepo
24
+ }
25
+
26
+ func newVectorClassSearchRepo(vectorRepo vectorRepo) *vectorClassSearchRepo {
27
+ return &vectorClassSearchRepo{vectorRepo}
28
+ }
29
+
30
+ func (r *vectorClassSearchRepo) VectorClassSearch(ctx context.Context,
31
+ params modulecapabilities.VectorClassSearchParams,
32
+ ) ([]search.Result, error) {
33
+ return r.vectorRepo.VectorSearch(ctx, dto.GetParams{
34
+ Filters: params.Filters,
35
+ Pagination: params.Pagination,
36
+ ClassName: params.ClassName,
37
+ Properties: r.getProperties(params.Properties),
38
+ }, nil, nil)
39
+ }
40
+
41
+ func (r *vectorClassSearchRepo) getProperties(properties []string) search.SelectProperties {
42
+ if len(properties) > 0 {
43
+ props := search.SelectProperties{}
44
+ for i := range properties {
45
+ props = append(props, search.SelectProperty{Name: properties[i]})
46
+ }
47
+ return props
48
+ }
49
+ return nil
50
+ }
platform/dbops/binaries/weaviate-src/usecases/classification/fakes_for_test.go ADDED
@@ -0,0 +1,477 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package classification
13
+
14
+ import (
15
+ "context"
16
+ "fmt"
17
+ "math"
18
+ "sort"
19
+ "sync"
20
+ "time"
21
+
22
+ "github.com/go-openapi/strfmt"
23
+ "github.com/pkg/errors"
24
+ "github.com/weaviate/weaviate/entities/additional"
25
+ "github.com/weaviate/weaviate/entities/dto"
26
+ libfilters "github.com/weaviate/weaviate/entities/filters"
27
+ "github.com/weaviate/weaviate/entities/models"
28
+ "github.com/weaviate/weaviate/entities/modulecapabilities"
29
+ "github.com/weaviate/weaviate/entities/schema"
30
+ "github.com/weaviate/weaviate/entities/search"
31
+ "github.com/weaviate/weaviate/usecases/objects"
32
+ "github.com/weaviate/weaviate/usecases/sharding"
33
+ )
34
+
35
+ type fakeSchemaGetter struct {
36
+ schema schema.Schema
37
+ }
38
+
39
+ func (f *fakeSchemaGetter) GetSchemaSkipAuth() schema.Schema {
40
+ return f.schema
41
+ }
42
+
43
+ func (f *fakeSchemaGetter) ReadOnlyClass(class string) *models.Class {
44
+ return f.schema.GetClass(class)
45
+ }
46
+
47
+ func (f *fakeSchemaGetter) ResolveAlias(string) string {
48
+ return ""
49
+ }
50
+
51
+ func (f *fakeSchemaGetter) GetAliasesForClass(string) []*models.Alias {
52
+ return nil
53
+ }
54
+
55
+ func (f *fakeSchemaGetter) CopyShardingState(class string) *sharding.State {
56
+ panic("not implemented")
57
+ }
58
+
59
+ func (f *fakeSchemaGetter) ShardOwner(class, shard string) (string, error) {
60
+ return shard, nil
61
+ }
62
+
63
+ func (f *fakeSchemaGetter) ShardReplicas(class, shard string) ([]string, error) {
64
+ return []string{shard}, nil
65
+ }
66
+
67
+ func (f *fakeSchemaGetter) TenantsShards(_ context.Context, class string, tenants ...string) (map[string]string, error) {
68
+ res := map[string]string{}
69
+ for _, t := range tenants {
70
+ res[t] = models.TenantActivityStatusHOT
71
+ }
72
+ return res, nil
73
+ }
74
+
75
+ func (f *fakeSchemaGetter) OptimisticTenantStatus(_ context.Context, class string, tenant string) (map[string]string, error) {
76
+ res := map[string]string{}
77
+ res[tenant] = models.TenantActivityStatusHOT
78
+ return res, nil
79
+ }
80
+
81
+ func (f *fakeSchemaGetter) ShardFromUUID(class string, uuid []byte) string { return string(uuid) }
82
+
83
+ func (f *fakeSchemaGetter) Nodes() []string {
84
+ panic("not implemented")
85
+ }
86
+
87
+ func (f *fakeSchemaGetter) NodeName() string {
88
+ panic("not implemented")
89
+ }
90
+
91
+ func (f *fakeSchemaGetter) ClusterHealthScore() int {
92
+ panic("not implemented")
93
+ }
94
+
95
+ func (f *fakeSchemaGetter) ResolveParentNodes(string, string,
96
+ ) (map[string]string, error) {
97
+ panic("not implemented")
98
+ }
99
+
100
+ func (f *fakeSchemaGetter) Statistics() map[string]any {
101
+ panic("not implemented")
102
+ }
103
+
104
+ type fakeClassificationRepo struct {
105
+ sync.Mutex
106
+ db map[strfmt.UUID]models.Classification
107
+ }
108
+
109
+ func newFakeClassificationRepo() *fakeClassificationRepo {
110
+ return &fakeClassificationRepo{
111
+ db: map[strfmt.UUID]models.Classification{},
112
+ }
113
+ }
114
+
115
+ func (f *fakeClassificationRepo) Put(ctx context.Context, class models.Classification) error {
116
+ f.Lock()
117
+ defer f.Unlock()
118
+
119
+ f.db[class.ID] = class
120
+ return nil
121
+ }
122
+
123
+ func (f *fakeClassificationRepo) Get(ctx context.Context, id strfmt.UUID) (*models.Classification, error) {
124
+ f.Lock()
125
+ defer f.Unlock()
126
+
127
+ class, ok := f.db[id]
128
+ if !ok {
129
+ return nil, nil
130
+ }
131
+
132
+ return &class, nil
133
+ }
134
+
135
+ func newFakeVectorRepoKNN(unclassified, classified search.Results) *fakeVectorRepoKNN {
136
+ return &fakeVectorRepoKNN{
137
+ unclassified: unclassified,
138
+ classified: classified,
139
+ db: map[strfmt.UUID]*models.Object{},
140
+ }
141
+ }
142
+
143
+ // read requests are specified through unclassified and classified,
144
+ // write requests (Put[Kind]) are stored in the db map
145
+ type fakeVectorRepoKNN struct {
146
+ sync.Mutex
147
+ unclassified []search.Result
148
+ classified []search.Result
149
+ db map[strfmt.UUID]*models.Object
150
+ errorOnAggregate error
151
+ batchStorageDelay time.Duration
152
+ }
153
+
154
+ func (f *fakeVectorRepoKNN) GetUnclassified(ctx context.Context,
155
+ class string, properties []string, propsToReturn []string,
156
+ filter *libfilters.LocalFilter,
157
+ ) ([]search.Result, error) {
158
+ f.Lock()
159
+ defer f.Unlock()
160
+ return f.unclassified, nil
161
+ }
162
+
163
+ func (f *fakeVectorRepoKNN) AggregateNeighbors(ctx context.Context, vector []float32,
164
+ class string, properties []string, k int,
165
+ filter *libfilters.LocalFilter,
166
+ ) ([]NeighborRef, error) {
167
+ f.Lock()
168
+ defer f.Unlock()
169
+
170
+ // simulate that this takes some time
171
+ time.Sleep(1 * time.Millisecond)
172
+
173
+ if k != 1 {
174
+ return nil, fmt.Errorf("fake vector repo only supports k=1")
175
+ }
176
+
177
+ results := f.classified
178
+ sort.SliceStable(results, func(i, j int) bool {
179
+ simI, err := cosineSim(results[i].Vector, vector)
180
+ if err != nil {
181
+ panic(err.Error())
182
+ }
183
+
184
+ simJ, err := cosineSim(results[j].Vector, vector)
185
+ if err != nil {
186
+ panic(err.Error())
187
+ }
188
+ return simI > simJ
189
+ })
190
+
191
+ var out []NeighborRef
192
+ schema := results[0].Schema.(map[string]interface{})
193
+ for _, propName := range properties {
194
+ prop, ok := schema[propName]
195
+ if !ok {
196
+ return nil, fmt.Errorf("missing prop %s", propName)
197
+ }
198
+
199
+ refs := prop.(models.MultipleRef)
200
+ if len(refs) != 1 {
201
+ return nil, fmt.Errorf("wrong length %d", len(refs))
202
+ }
203
+
204
+ out = append(out, NeighborRef{
205
+ Beacon: refs[0].Beacon,
206
+ WinningCount: 1,
207
+ OverallCount: 1,
208
+ LosingCount: 1,
209
+ Property: propName,
210
+ })
211
+ }
212
+
213
+ return out, f.errorOnAggregate
214
+ }
215
+
216
+ func (f *fakeVectorRepoKNN) ZeroShotSearch(ctx context.Context, vector []float32,
217
+ class string, properties []string,
218
+ filter *libfilters.LocalFilter,
219
+ ) ([]search.Result, error) {
220
+ return []search.Result{}, nil
221
+ }
222
+
223
+ func (f *fakeVectorRepoKNN) VectorSearch(ctx context.Context,
224
+ params dto.GetParams, targetVectors []string, searchVectors []models.Vector,
225
+ ) ([]search.Result, error) {
226
+ f.Lock()
227
+ defer f.Unlock()
228
+ return nil, fmt.Errorf("vector class search not implemented in fake")
229
+ }
230
+
231
+ func (f *fakeVectorRepoKNN) BatchPutObjects(ctx context.Context, objects objects.BatchObjects, repl *additional.ReplicationProperties, schemaVersion uint64) (objects.BatchObjects, error) {
232
+ f.Lock()
233
+ defer f.Unlock()
234
+
235
+ if f.batchStorageDelay > 0 {
236
+ time.Sleep(f.batchStorageDelay)
237
+ }
238
+
239
+ for _, batchObject := range objects {
240
+ f.db[batchObject.Object.ID] = batchObject.Object
241
+ }
242
+ return objects, nil
243
+ }
244
+
245
+ func (f *fakeVectorRepoKNN) get(id strfmt.UUID) (*models.Object, bool) {
246
+ f.Lock()
247
+ defer f.Unlock()
248
+ t, ok := f.db[id]
249
+ return t, ok
250
+ }
251
+
252
+ func newFakeVectorRepoContextual(unclassified, targets search.Results) *fakeVectorRepoContextual {
253
+ return &fakeVectorRepoContextual{
254
+ unclassified: unclassified,
255
+ targets: targets,
256
+ db: map[strfmt.UUID]*models.Object{},
257
+ }
258
+ }
259
+
260
+ // read requests are specified through unclassified and classified,
261
+ // write requests (Put[Kind]) are stored in the db map
262
+ type fakeVectorRepoContextual struct {
263
+ sync.Mutex
264
+ unclassified []search.Result
265
+ targets []search.Result
266
+ db map[strfmt.UUID]*models.Object
267
+ errorOnAggregate error
268
+ }
269
+
270
+ func (f *fakeVectorRepoContextual) get(id strfmt.UUID) (*models.Object, bool) {
271
+ f.Lock()
272
+ defer f.Unlock()
273
+ t, ok := f.db[id]
274
+ return t, ok
275
+ }
276
+
277
+ func (f *fakeVectorRepoContextual) GetUnclassified(ctx context.Context,
278
+ class string, properties []string, propsToReturn []string,
279
+ filter *libfilters.LocalFilter,
280
+ ) ([]search.Result, error) {
281
+ return f.unclassified, nil
282
+ }
283
+
284
+ func (f *fakeVectorRepoContextual) AggregateNeighbors(ctx context.Context, vector []float32,
285
+ class string, properties []string, k int,
286
+ filter *libfilters.LocalFilter,
287
+ ) ([]NeighborRef, error) {
288
+ panic("not implemented")
289
+ }
290
+
291
+ func (f *fakeVectorRepoContextual) ZeroShotSearch(ctx context.Context, vector []float32,
292
+ class string, properties []string,
293
+ filter *libfilters.LocalFilter,
294
+ ) ([]search.Result, error) {
295
+ panic("not implemented")
296
+ }
297
+
298
+ func (f *fakeVectorRepoContextual) BatchPutObjects(ctx context.Context, objects objects.BatchObjects, repl *additional.ReplicationProperties, schemaVersion uint64) (objects.BatchObjects, error) {
299
+ f.Lock()
300
+ defer f.Unlock()
301
+ for _, batchObject := range objects {
302
+ f.db[batchObject.Object.ID] = batchObject.Object
303
+ }
304
+ return objects, nil
305
+ }
306
+
307
+ func (f *fakeVectorRepoContextual) VectorSearch(ctx context.Context,
308
+ params dto.GetParams, targetVectors []string, searchVectors []models.Vector,
309
+ ) ([]search.Result, error) {
310
+ if searchVectors[0] == nil {
311
+ filteredTargets := matchClassName(f.targets, params.ClassName)
312
+ return filteredTargets, nil
313
+ }
314
+
315
+ switch searchVector := searchVectors[0].(type) {
316
+ case []float32:
317
+ // simulate that this takes some time
318
+ time.Sleep(5 * time.Millisecond)
319
+
320
+ filteredTargets := matchClassName(f.targets, params.ClassName)
321
+ results := filteredTargets
322
+ sort.SliceStable(results, func(i, j int) bool {
323
+ simI, err := cosineSim(results[i].Vector, searchVector)
324
+ if err != nil {
325
+ panic(err.Error())
326
+ }
327
+
328
+ simJ, err := cosineSim(results[j].Vector, searchVector)
329
+ if err != nil {
330
+ panic(err.Error())
331
+ }
332
+ return simI > simJ
333
+ })
334
+
335
+ if len(results) == 0 {
336
+ return nil, f.errorOnAggregate
337
+ }
338
+
339
+ out := []search.Result{
340
+ results[0],
341
+ }
342
+
343
+ return out, f.errorOnAggregate
344
+ default:
345
+ return nil, fmt.Errorf("unsupported search vector type: %T", searchVectors[0])
346
+ }
347
+ }
348
+
349
+ func cosineSim(a, b []float32) (float32, error) {
350
+ if len(a) != len(b) {
351
+ return 0, fmt.Errorf("vectors have different dimensions")
352
+ }
353
+
354
+ var (
355
+ sumProduct float64
356
+ sumASquare float64
357
+ sumBSquare float64
358
+ )
359
+
360
+ for i := range a {
361
+ sumProduct += float64(a[i] * b[i])
362
+ sumASquare += float64(a[i] * a[i])
363
+ sumBSquare += float64(b[i] * b[i])
364
+ }
365
+
366
+ return float32(sumProduct / (math.Sqrt(sumASquare) * math.Sqrt(sumBSquare))), nil
367
+ }
368
+
369
+ func matchClassName(in []search.Result, className string) []search.Result {
370
+ var out []search.Result
371
+ for _, item := range in {
372
+ if item.ClassName == className {
373
+ out = append(out, item)
374
+ }
375
+ }
376
+
377
+ return out
378
+ }
379
+
380
+ type fakeModuleClassifyFn struct {
381
+ fakeExactCategoryMappings map[string]string
382
+ fakeMainCategoryMappings map[string]string
383
+ }
384
+
385
+ func NewFakeModuleClassifyFn() *fakeModuleClassifyFn {
386
+ return &fakeModuleClassifyFn{
387
+ fakeExactCategoryMappings: map[string]string{
388
+ "75ba35af-6a08-40ae-b442-3bec69b355f9": "1b204f16-7da6-44fd-bbd2-8cc4a7414bc3",
389
+ "a2bbcbdc-76e1-477d-9e72-a6d2cfb50109": "ec500f39-1dc9-4580-9bd1-55a8ea8e37a2",
390
+ "069410c3-4b9e-4f68-8034-32a066cb7997": "ec500f39-1dc9-4580-9bd1-55a8ea8e37a2",
391
+ "06a1e824-889c-4649-97f9-1ed3fa401d8e": "027b708a-31ca-43ea-9001-88bec864c79c",
392
+ },
393
+ fakeMainCategoryMappings: map[string]string{
394
+ "6402e649-b1e0-40ea-b192-a64eab0d5e56": "5a3d909a-4f0d-4168-8f5c-cd3074d1e79a",
395
+ "f850439a-d3cd-4f17-8fbf-5a64405645cd": "39c6abe3-4bbe-4c4e-9e60-ca5e99ec6b4e",
396
+ "069410c3-4b9e-4f68-8034-32a066cb7997": "39c6abe3-4bbe-4c4e-9e60-ca5e99ec6b4e",
397
+ },
398
+ }
399
+ }
400
+
401
+ func (c *fakeModuleClassifyFn) classifyFn(item search.Result, itemIndex int,
402
+ params models.Classification, filters modulecapabilities.Filters, writer modulecapabilities.Writer,
403
+ ) error {
404
+ var classified []string
405
+
406
+ classifiedProp := c.fakeClassification(&item, "exactCategory", c.fakeExactCategoryMappings)
407
+ if len(classifiedProp) > 0 {
408
+ classified = append(classified, classifiedProp)
409
+ }
410
+
411
+ classifiedProp = c.fakeClassification(&item, "mainCategory", c.fakeMainCategoryMappings)
412
+ if len(classifiedProp) > 0 {
413
+ classified = append(classified, classifiedProp)
414
+ }
415
+
416
+ c.extendItemWithObjectMeta(&item, params, classified)
417
+
418
+ err := writer.Store(item)
419
+ if err != nil {
420
+ return fmt.Errorf("store %s/%s: %w", item.ClassName, item.ID, err)
421
+ }
422
+ return nil
423
+ }
424
+
425
+ func (c *fakeModuleClassifyFn) fakeClassification(item *search.Result, propName string,
426
+ fakes map[string]string,
427
+ ) string {
428
+ if target, ok := fakes[item.ID.String()]; ok {
429
+ beacon := "weaviate://localhost/" + target
430
+ item.Schema.(map[string]interface{})[propName] = models.MultipleRef{
431
+ &models.SingleRef{
432
+ Beacon: strfmt.URI(beacon),
433
+ Classification: nil,
434
+ },
435
+ }
436
+ return propName
437
+ }
438
+ return ""
439
+ }
440
+
441
+ func (c *fakeModuleClassifyFn) extendItemWithObjectMeta(item *search.Result,
442
+ params models.Classification, classified []string,
443
+ ) {
444
+ if item.AdditionalProperties == nil {
445
+ item.AdditionalProperties = models.AdditionalProperties{}
446
+ }
447
+
448
+ item.AdditionalProperties["classification"] = additional.Classification{
449
+ ID: params.ID,
450
+ Scope: params.ClassifyProperties,
451
+ ClassifiedFields: classified,
452
+ Completed: strfmt.DateTime(time.Now()),
453
+ }
454
+ }
455
+
456
+ type fakeModulesProvider struct {
457
+ fakeModuleClassifyFn *fakeModuleClassifyFn
458
+ }
459
+
460
+ func NewFakeModulesProvider() *fakeModulesProvider {
461
+ return &fakeModulesProvider{NewFakeModuleClassifyFn()}
462
+ }
463
+
464
+ func (m *fakeModulesProvider) ParseClassifierSettings(name string,
465
+ params *models.Classification,
466
+ ) error {
467
+ return nil
468
+ }
469
+
470
+ func (m *fakeModulesProvider) GetClassificationFn(className, name string,
471
+ params modulecapabilities.ClassifyParams,
472
+ ) (modulecapabilities.ClassifyItemFn, error) {
473
+ if name == "text2vec-contextionary-custom-contextual" {
474
+ return m.fakeModuleClassifyFn.classifyFn, nil
475
+ }
476
+ return nil, errors.Errorf("classifier %s not found", name)
477
+ }
platform/dbops/binaries/weaviate-src/usecases/classification/ref_meta.go ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package classification
13
+
14
+ import "github.com/weaviate/weaviate/entities/models"
15
+
16
+ // NeighborRefDistances include various distances about the winning and losing
17
+ // groups (knn)
18
+ type NeighborRefDistances struct {
19
+ ClosestOverallDistance float32
20
+
21
+ // Winning
22
+ ClosestWinningDistance float32
23
+ MeanWinningDistance float32
24
+
25
+ // Losing (optional)
26
+ MeanLosingDistance *float32
27
+ ClosestLosingDistance *float32
28
+ }
29
+
30
+ func (r NeighborRef) Meta() *models.ReferenceMetaClassification {
31
+ out := &models.ReferenceMetaClassification{
32
+ OverallCount: int64(r.OverallCount),
33
+ WinningCount: int64(r.WinningCount),
34
+ LosingCount: int64(r.LosingCount),
35
+ ClosestOverallDistance: float64(r.Distances.ClosestOverallDistance),
36
+ WinningDistance: float64(r.Distances.MeanWinningDistance), // deprecated, remove in 0.23.0
37
+ MeanWinningDistance: float64(r.Distances.MeanWinningDistance),
38
+ ClosestWinningDistance: float64(r.Distances.ClosestWinningDistance),
39
+ }
40
+
41
+ if r.Distances.MeanLosingDistance != nil {
42
+ out.MeanLosingDistance = ptFloat64(float64(*r.Distances.MeanLosingDistance))
43
+ out.LosingDistance = ptFloat64(float64(*r.Distances.MeanLosingDistance)) // deprecated
44
+ }
45
+
46
+ if r.Distances.ClosestLosingDistance != nil {
47
+ out.ClosestLosingDistance = ptFloat64(float64(*r.Distances.ClosestLosingDistance))
48
+ }
49
+
50
+ return out
51
+ }
52
+
53
+ func ptFloat64(in float64) *float64 {
54
+ return &in
55
+ }
platform/dbops/binaries/weaviate-src/usecases/classification/ref_meta_test.go ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package classification
13
+
14
+ import (
15
+ "testing"
16
+
17
+ "github.com/stretchr/testify/assert"
18
+ "github.com/weaviate/weaviate/entities/models"
19
+ )
20
+
21
+ func Test_RefMeta(t *testing.T) {
22
+ t.Run("without a losing group", func(t *testing.T) {
23
+ source := NeighborRef{
24
+ WinningCount: 3,
25
+ OverallCount: 3,
26
+ LosingCount: 0,
27
+ Distances: NeighborRefDistances{
28
+ ClosestWinningDistance: 0.1,
29
+ ClosestOverallDistance: 0.1,
30
+ MeanWinningDistance: 0.2,
31
+ },
32
+ }
33
+
34
+ expected := &models.ReferenceMetaClassification{
35
+ ClosestWinningDistance: 0.1,
36
+ ClosestOverallDistance: 0.1,
37
+ MeanWinningDistance: 0.2,
38
+ WinningDistance: 0.2, // deprecated, must be removed in 0.23.0
39
+ OverallCount: 3,
40
+ WinningCount: 3,
41
+ LosingCount: 0,
42
+ }
43
+
44
+ actual := source.Meta()
45
+ assert.InDelta(t, expected.ClosestWinningDistance, actual.ClosestWinningDistance, 0.001)
46
+ assert.InDelta(t, expected.ClosestOverallDistance, actual.ClosestOverallDistance, 0.001)
47
+ assert.InDelta(t, expected.MeanWinningDistance, actual.MeanWinningDistance, 0.001)
48
+ assert.InDelta(t, expected.WinningDistance, actual.WinningDistance, 0.001)
49
+ assert.Equal(t, expected.OverallCount, actual.OverallCount)
50
+ assert.Equal(t, expected.WinningCount, actual.WinningCount)
51
+ assert.Equal(t, expected.LosingCount, actual.LosingCount)
52
+ })
53
+
54
+ t.Run("with a losing group", func(t *testing.T) {
55
+ source := NeighborRef{
56
+ WinningCount: 3,
57
+ OverallCount: 5,
58
+ LosingCount: 2,
59
+ Distances: NeighborRefDistances{
60
+ ClosestWinningDistance: 0.1,
61
+ ClosestOverallDistance: 0.1,
62
+ MeanWinningDistance: 0.2,
63
+ ClosestLosingDistance: ptFloat32(0.15),
64
+ MeanLosingDistance: ptFloat32(0.25),
65
+ },
66
+ }
67
+
68
+ expected := &models.ReferenceMetaClassification{
69
+ ClosestOverallDistance: 0.1,
70
+ ClosestWinningDistance: 0.1,
71
+ MeanWinningDistance: 0.2,
72
+ WinningDistance: 0.2, // deprecated, must be removed in 0.23.0
73
+ ClosestLosingDistance: ptFloat64(0.15),
74
+ MeanLosingDistance: ptFloat64(0.25),
75
+ LosingDistance: ptFloat64(0.25), // deprecated, must be removed in 0.23.0
76
+ OverallCount: 5,
77
+ WinningCount: 3,
78
+ LosingCount: 2,
79
+ }
80
+
81
+ actual := source.Meta()
82
+ assert.InDelta(t, expected.ClosestOverallDistance, actual.ClosestOverallDistance, 0.001)
83
+ assert.InDelta(t, expected.ClosestWinningDistance, actual.ClosestWinningDistance, 0.001)
84
+ assert.InDelta(t, expected.MeanWinningDistance, actual.MeanWinningDistance, 0.001)
85
+ assert.InDelta(t, expected.WinningDistance, actual.WinningDistance, 0.001)
86
+ assert.InDelta(t, *expected.ClosestLosingDistance, *actual.ClosestLosingDistance, 0.001)
87
+ assert.InDelta(t, *expected.MeanLosingDistance, *actual.MeanLosingDistance, 0.001)
88
+ assert.InDelta(t, *expected.LosingDistance, *actual.LosingDistance, 0.001)
89
+ assert.Equal(t, expected.OverallCount, actual.OverallCount)
90
+ assert.Equal(t, expected.OverallCount, actual.OverallCount)
91
+ assert.Equal(t, expected.WinningCount, actual.WinningCount)
92
+ assert.Equal(t, expected.LosingCount, actual.LosingCount)
93
+ })
94
+ }
95
+
96
+ func ptFloat32(in float32) *float32 {
97
+ return &in
98
+ }
platform/dbops/binaries/weaviate-src/usecases/classification/schema_for_test.go ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package classification
13
+
14
+ import (
15
+ "fmt"
16
+
17
+ "github.com/go-openapi/strfmt"
18
+ "github.com/weaviate/weaviate/entities/models"
19
+ "github.com/weaviate/weaviate/entities/schema"
20
+ "github.com/weaviate/weaviate/entities/search"
21
+ )
22
+
23
+ func testSchema() schema.Schema {
24
+ return schema.Schema{
25
+ Objects: &models.Schema{
26
+ Classes: []*models.Class{
27
+ {
28
+ Class: "ExactCategory",
29
+ },
30
+ {
31
+ Class: "MainCategory",
32
+ },
33
+ {
34
+ Class: "Article",
35
+ Properties: []*models.Property{
36
+ {
37
+ Name: "description",
38
+ DataType: []string{string(schema.DataTypeText)},
39
+ },
40
+ {
41
+ Name: "name",
42
+ DataType: schema.DataTypeText.PropString(),
43
+ Tokenization: models.PropertyTokenizationWhitespace,
44
+ },
45
+ {
46
+ Name: "exactCategory",
47
+ DataType: []string{"ExactCategory"},
48
+ },
49
+ {
50
+ Name: "mainCategory",
51
+ DataType: []string{"MainCategory"},
52
+ },
53
+ {
54
+ Name: "categories",
55
+ DataType: []string{"ExactCategory"},
56
+ },
57
+ {
58
+ Name: "anyCategory",
59
+ DataType: []string{"MainCategory", "ExactCategory"},
60
+ },
61
+ {
62
+ Name: "words",
63
+ DataType: schema.DataTypeInt.PropString(),
64
+ },
65
+ },
66
+ },
67
+ },
68
+ },
69
+ }
70
+ }
71
+
72
+ // vector position close to [1,0,0] means -> politics, [0,1,0] means -> society, [0, 0, 1] -> food&drink
73
+ func testDataToBeClassified() search.Results {
74
+ return search.Results{
75
+ search.Result{
76
+ ID: "75ba35af-6a08-40ae-b442-3bec69b355f9",
77
+ ClassName: "Article",
78
+ Vector: []float32{0.78, 0, 0},
79
+ Schema: map[string]interface{}{
80
+ "description": "Barack Obama is a former US president",
81
+ },
82
+ },
83
+ search.Result{
84
+ ID: "f850439a-d3cd-4f17-8fbf-5a64405645cd",
85
+ ClassName: "Article",
86
+ Vector: []float32{0.90, 0, 0},
87
+ Schema: map[string]interface{}{
88
+ "description": "Michelle Obama is Barack Obamas wife",
89
+ },
90
+ },
91
+ search.Result{
92
+ ID: "a2bbcbdc-76e1-477d-9e72-a6d2cfb50109",
93
+ ClassName: "Article",
94
+ Vector: []float32{0, 0.78, 0},
95
+ Schema: map[string]interface{}{
96
+ "description": "Johnny Depp is an actor",
97
+ },
98
+ },
99
+ search.Result{
100
+ ID: "069410c3-4b9e-4f68-8034-32a066cb7997",
101
+ ClassName: "Article",
102
+ Vector: []float32{0, 0.90, 0},
103
+ Schema: map[string]interface{}{
104
+ "description": "Brad Pitt starred in a Quentin Tarantino movie",
105
+ },
106
+ },
107
+ search.Result{
108
+ ID: "06a1e824-889c-4649-97f9-1ed3fa401d8e",
109
+ ClassName: "Article",
110
+ Vector: []float32{0, 0, 0.78},
111
+ Schema: map[string]interface{}{
112
+ "description": "Ice Cream often contains a lot of sugar",
113
+ },
114
+ },
115
+ search.Result{
116
+ ID: "6402e649-b1e0-40ea-b192-a64eab0d5e56",
117
+ ClassName: "Article",
118
+ Vector: []float32{0, 0, 0.90},
119
+ Schema: map[string]interface{}{
120
+ "description": "French Fries are more common in Belgium and the US than in France",
121
+ },
122
+ },
123
+ }
124
+ }
125
+
126
+ const (
127
+ idMainCategoryPoliticsAndSociety = "39c6abe3-4bbe-4c4e-9e60-ca5e99ec6b4e"
128
+ idMainCategoryFoodAndDrink = "5a3d909a-4f0d-4168-8f5c-cd3074d1e79a"
129
+ idCategoryPolitics = "1b204f16-7da6-44fd-bbd2-8cc4a7414bc3"
130
+ idCategorySociety = "ec500f39-1dc9-4580-9bd1-55a8ea8e37a2"
131
+ idCategoryFoodAndDrink = "027b708a-31ca-43ea-9001-88bec864c79c"
132
+ )
133
+
134
+ // only used for contextual type classification
135
+ func testDataPossibleTargets() search.Results {
136
+ return search.Results{
137
+ search.Result{
138
+ ID: idMainCategoryPoliticsAndSociety,
139
+ ClassName: "MainCategory",
140
+ Vector: []float32{1.01, 1.01, 0},
141
+ Schema: map[string]interface{}{
142
+ "name": "Politics and Society",
143
+ },
144
+ },
145
+ search.Result{
146
+ ID: idMainCategoryFoodAndDrink,
147
+ ClassName: "MainCategory",
148
+ Vector: []float32{0, 0, 0.99},
149
+ Schema: map[string]interface{}{
150
+ "name": "Food and Drinks",
151
+ },
152
+ },
153
+ search.Result{
154
+ ID: idCategoryPolitics,
155
+ ClassName: "ExactCategory",
156
+ Vector: []float32{0.99, 0, 0},
157
+ Schema: map[string]interface{}{
158
+ "name": "Politics",
159
+ },
160
+ },
161
+ search.Result{
162
+ ID: idCategorySociety,
163
+ ClassName: "ExactCategory",
164
+ Vector: []float32{0, 0.90, 0},
165
+ Schema: map[string]interface{}{
166
+ "name": "Society",
167
+ },
168
+ },
169
+ search.Result{
170
+ ID: idCategoryFoodAndDrink,
171
+ ClassName: "ExactCategory",
172
+ Vector: []float32{0, 0, 0.99},
173
+ Schema: map[string]interface{}{
174
+ "name": "Food and Drink",
175
+ },
176
+ },
177
+ }
178
+ }
179
+
180
+ func beaconRef(target string) *models.SingleRef {
181
+ beacon := fmt.Sprintf("weaviate://localhost/%s", target)
182
+ return &models.SingleRef{Beacon: strfmt.URI(beacon)}
183
+ }
184
+
185
+ // only used for knn-type
186
+ func testDataAlreadyClassified() search.Results {
187
+ return search.Results{
188
+ search.Result{
189
+ ID: "8aeecd06-55a0-462c-9853-81b31a284d80",
190
+ ClassName: "Article",
191
+ Vector: []float32{1, 0, 0},
192
+ Schema: map[string]interface{}{
193
+ "description": "This article talks about politics",
194
+ "exactCategory": models.MultipleRef{beaconRef(idCategoryPolitics)},
195
+ "mainCategory": models.MultipleRef{beaconRef(idMainCategoryPoliticsAndSociety)},
196
+ },
197
+ },
198
+ search.Result{
199
+ ID: "9f4c1847-2567-4de7-8861-34cf47a071ae",
200
+ ClassName: "Article",
201
+ Vector: []float32{0, 1, 0},
202
+ Schema: map[string]interface{}{
203
+ "description": "This articles talks about society",
204
+ "exactCategory": models.MultipleRef{beaconRef(idCategorySociety)},
205
+ "mainCategory": models.MultipleRef{beaconRef(idMainCategoryPoliticsAndSociety)},
206
+ },
207
+ },
208
+ search.Result{
209
+ ID: "926416ec-8fb1-4e40-ab8c-37b226b3d68e",
210
+ ClassName: "Article",
211
+ Vector: []float32{0, 0, 1},
212
+ Schema: map[string]interface{}{
213
+ "description": "This article talks about food",
214
+ "exactCategory": models.MultipleRef{beaconRef(idCategoryFoodAndDrink)},
215
+ "mainCategory": models.MultipleRef{beaconRef(idMainCategoryFoodAndDrink)},
216
+ },
217
+ },
218
+ }
219
+ }
platform/dbops/binaries/weaviate-src/usecases/classification/transactions.go ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package classification
13
+
14
+ import (
15
+ "encoding/json"
16
+
17
+ "github.com/pkg/errors"
18
+ "github.com/weaviate/weaviate/entities/models"
19
+ "github.com/weaviate/weaviate/usecases/cluster"
20
+ )
21
+
22
+ const TransactionPut cluster.TransactionType = "put_single"
23
+
24
+ type TransactionPutPayload struct {
25
+ Classification models.Classification `json:"classification"`
26
+ }
27
+
28
+ func UnmarshalTransaction(txType cluster.TransactionType,
29
+ payload json.RawMessage,
30
+ ) (interface{}, error) {
31
+ switch txType {
32
+ case TransactionPut:
33
+ return unmarshalPut(payload)
34
+
35
+ default:
36
+ return nil, errors.Errorf("unrecognized schema transaction type %q", txType)
37
+
38
+ }
39
+ }
40
+
41
+ func unmarshalPut(payload json.RawMessage) (interface{}, error) {
42
+ var pl TransactionPutPayload
43
+ if err := json.Unmarshal(payload, &pl); err != nil {
44
+ return nil, err
45
+ }
46
+
47
+ return pl, nil
48
+ }
platform/dbops/binaries/weaviate-src/usecases/classification/validation.go ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package classification
13
+
14
+ import (
15
+ "fmt"
16
+
17
+ "github.com/weaviate/weaviate/entities/errorcompounder"
18
+ "github.com/weaviate/weaviate/entities/models"
19
+ "github.com/weaviate/weaviate/entities/schema"
20
+ )
21
+
22
+ const (
23
+ TypeKNN = "knn"
24
+ TypeContextual = "text2vec-contextionary-contextual"
25
+ TypeZeroShot = "zeroshot"
26
+ )
27
+
28
+ type Validator struct {
29
+ authorizedGetClass func(string) (*models.Class, error)
30
+ errors *errorcompounder.SafeErrorCompounder
31
+ subject models.Classification
32
+ }
33
+
34
+ func NewValidator(authorizedGetClass func(string) (*models.Class, error), subject models.Classification) *Validator {
35
+ return &Validator{
36
+ authorizedGetClass: authorizedGetClass,
37
+ errors: &errorcompounder.SafeErrorCompounder{},
38
+ subject: subject,
39
+ }
40
+ }
41
+
42
+ func (v *Validator) Do() error {
43
+ v.validate()
44
+
45
+ err := v.errors.First()
46
+ if err != nil {
47
+ return fmt.Errorf("invalid classification: %w", err)
48
+ }
49
+
50
+ return nil
51
+ }
52
+
53
+ func (v *Validator) validate() {
54
+ if v.subject.Class == "" {
55
+ v.errors.Add(fmt.Errorf("class must be set"))
56
+ return
57
+ }
58
+
59
+ class, err := v.authorizedGetClass(v.subject.Class)
60
+ if err != nil {
61
+ v.errors.Add(err)
62
+ return
63
+ }
64
+ if class == nil {
65
+ v.errors.Addf("class '%s' not found in schema", v.subject.Class)
66
+ return
67
+ }
68
+
69
+ v.contextualTypeFeasibility()
70
+ v.knnTypeFeasibility()
71
+ v.basedOnProperties(class)
72
+ v.classifyProperties(class)
73
+ }
74
+
75
+ func (v *Validator) contextualTypeFeasibility() {
76
+ if !v.typeText2vecContextionaryContextual() {
77
+ return
78
+ }
79
+
80
+ if v.subject.Filters != nil && v.subject.Filters.TrainingSetWhere != nil {
81
+ v.errors.Addf("type is 'text2vec-contextionary-contextual', but 'trainingSetWhere' filter is set, for 'text2vec-contextionary-contextual' there is no training data, instead limit possible target data directly through setting 'targetWhere'")
82
+ }
83
+ }
84
+
85
+ func (v *Validator) knnTypeFeasibility() {
86
+ if !v.typeKNN() {
87
+ return
88
+ }
89
+
90
+ if v.subject.Filters != nil && v.subject.Filters.TargetWhere != nil {
91
+ v.errors.Addf("type is 'knn', but 'targetWhere' filter is set, for 'knn' you cannot limit target data directly, instead limit training data through setting 'trainingSetWhere'")
92
+ }
93
+ }
94
+
95
+ func (v *Validator) basedOnProperties(class *models.Class) {
96
+ if len(v.subject.BasedOnProperties) == 0 {
97
+ v.errors.Addf("basedOnProperties must have at least one property")
98
+ return
99
+ }
100
+
101
+ if len(v.subject.BasedOnProperties) > 1 {
102
+ v.errors.Addf("only a single property in basedOnProperties supported at the moment, got %v",
103
+ v.subject.BasedOnProperties)
104
+ return
105
+ }
106
+
107
+ for _, prop := range v.subject.BasedOnProperties {
108
+ v.basedOnProperty(class, prop)
109
+ }
110
+ }
111
+
112
+ func (v *Validator) basedOnProperty(class *models.Class, propName string) {
113
+ prop, ok := v.propertyByName(class, propName)
114
+ if !ok {
115
+ v.errors.Addf("basedOnProperties: property '%s' does not exist", propName)
116
+ return
117
+ }
118
+
119
+ dt, err := schema.FindPropertyDataTypeWithRefsAndAuth(v.authorizedGetClass, prop.DataType, false, "")
120
+ if err != nil {
121
+ v.errors.Addf("basedOnProperties: %v", err)
122
+ return
123
+ }
124
+
125
+ if !dt.IsPrimitive() {
126
+ v.errors.Addf("basedOnProperties: property '%s' must be of type 'text'", propName)
127
+ return
128
+ }
129
+
130
+ if dt.AsPrimitive() != schema.DataTypeText {
131
+ v.errors.Addf("basedOnProperties: property '%s' must be of type 'text'", propName)
132
+ return
133
+ }
134
+ }
135
+
136
+ func (v *Validator) classifyProperties(class *models.Class) {
137
+ if len(v.subject.ClassifyProperties) == 0 {
138
+ v.errors.Addf("classifyProperties must have at least one property")
139
+ return
140
+ }
141
+
142
+ for _, prop := range v.subject.ClassifyProperties {
143
+ v.classifyProperty(class, prop)
144
+ }
145
+ }
146
+
147
+ func (v *Validator) classifyProperty(class *models.Class, propName string) {
148
+ prop, ok := v.propertyByName(class, propName)
149
+ if !ok {
150
+ v.errors.Addf("classifyProperties: property '%s' does not exist", propName)
151
+ return
152
+ }
153
+
154
+ dt, err := schema.FindPropertyDataTypeWithRefsAndAuth(v.authorizedGetClass, prop.DataType, false, "")
155
+ if err != nil {
156
+ v.errors.Addf("classifyProperties: %w", err)
157
+ return
158
+ }
159
+
160
+ if !dt.IsReference() {
161
+ v.errors.Addf("classifyProperties: property '%s' must be of reference type (cref)", propName)
162
+ return
163
+ }
164
+
165
+ if v.typeText2vecContextionaryContextual() {
166
+ if len(dt.Classes()) > 1 {
167
+ v.errors.Addf("classifyProperties: property '%s'"+
168
+ " has more than one target class, classification of type 'text2vec-contextionary-contextual' requires exactly one target class", propName)
169
+ return
170
+ }
171
+ }
172
+ }
173
+
174
+ func (v *Validator) propertyByName(class *models.Class, propName string) (*models.Property, bool) {
175
+ for _, prop := range class.Properties {
176
+ if prop.Name == propName {
177
+ return prop, true
178
+ }
179
+ }
180
+
181
+ return nil, false
182
+ }
183
+
184
+ func (v *Validator) typeText2vecContextionaryContextual() bool {
185
+ if v.subject.Type == "" {
186
+ return false
187
+ }
188
+
189
+ return v.subject.Type == TypeContextual
190
+ }
191
+
192
+ func (v *Validator) typeKNN() bool {
193
+ if v.subject.Type == "" {
194
+ return true
195
+ }
196
+
197
+ return v.subject.Type == TypeKNN
198
+ }
platform/dbops/binaries/weaviate-src/usecases/classification/validation_test.go ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package classification
13
+
14
+ import (
15
+ "fmt"
16
+ "testing"
17
+
18
+ "github.com/stretchr/testify/assert"
19
+
20
+ "github.com/weaviate/weaviate/entities/models"
21
+ )
22
+
23
+ func Test_ValidateUserInput(t *testing.T) {
24
+ type testcase struct {
25
+ name string
26
+ input models.Classification
27
+ expectedError error
28
+ }
29
+
30
+ // knn or general
31
+ tests := []testcase{
32
+ {
33
+ name: "missing class",
34
+ input: models.Classification{
35
+ BasedOnProperties: []string{"description"},
36
+ ClassifyProperties: []string{"exactCategory"},
37
+ },
38
+ expectedError: fmt.Errorf("invalid classification: class must be set"),
39
+ },
40
+
41
+ {
42
+ name: "missing basedOnProperty (nil)",
43
+ input: models.Classification{
44
+ Class: "Article",
45
+ BasedOnProperties: nil,
46
+ ClassifyProperties: []string{"exactCategory"},
47
+ },
48
+ expectedError: fmt.Errorf("invalid classification: basedOnProperties must have at least one property"),
49
+ },
50
+ {
51
+ name: "missing basedOnProperty (len=0)",
52
+ input: models.Classification{
53
+ Class: "Article",
54
+ BasedOnProperties: []string{},
55
+ ClassifyProperties: []string{"exactCategory"},
56
+ },
57
+ expectedError: fmt.Errorf("invalid classification: basedOnProperties must have at least one property"),
58
+ },
59
+
60
+ {
61
+ name: "more than one basedOnProperty",
62
+ input: models.Classification{
63
+ Class: "Article",
64
+ BasedOnProperties: []string{"description", "name"},
65
+ ClassifyProperties: []string{"exactCategory"},
66
+ },
67
+ expectedError: fmt.Errorf("invalid classification: only a single property in basedOnProperties " +
68
+ "supported at the moment, got [description name]"),
69
+ },
70
+
71
+ {
72
+ name: "basedOnProperty does not exist",
73
+ input: models.Classification{
74
+ Class: "Article",
75
+ BasedOnProperties: []string{"doesNotExist"},
76
+ ClassifyProperties: []string{"exactCategory"},
77
+ },
78
+ expectedError: fmt.Errorf("invalid classification: basedOnProperties: property 'doesNotExist' does not exist"),
79
+ },
80
+
81
+ {
82
+ name: "basedOnProperty is not of type text",
83
+ input: models.Classification{
84
+ Class: "Article",
85
+ BasedOnProperties: []string{"words"},
86
+ ClassifyProperties: []string{"exactCategory"},
87
+ },
88
+ expectedError: fmt.Errorf("invalid classification: basedOnProperties: property 'words' must be of type 'text'"),
89
+ },
90
+
91
+ {
92
+ name: "missing classifyProperties (nil)",
93
+ input: models.Classification{
94
+ Class: "Article",
95
+ BasedOnProperties: []string{"description"},
96
+ ClassifyProperties: nil,
97
+ },
98
+ expectedError: fmt.Errorf("invalid classification: classifyProperties must have at least one property"),
99
+ },
100
+
101
+ {
102
+ name: "missing classifyProperties (len=0)",
103
+ input: models.Classification{
104
+ Class: "Article",
105
+ BasedOnProperties: []string{"description"},
106
+ ClassifyProperties: []string{},
107
+ },
108
+ expectedError: fmt.Errorf("invalid classification: classifyProperties must have at least one property"),
109
+ },
110
+
111
+ {
112
+ name: "classifyProperties does not exist",
113
+ input: models.Classification{
114
+ Class: "Article",
115
+ BasedOnProperties: []string{"description"},
116
+ ClassifyProperties: []string{"doesNotExist"},
117
+ },
118
+ expectedError: fmt.Errorf("invalid classification: classifyProperties: property 'doesNotExist' does not exist"),
119
+ },
120
+
121
+ {
122
+ name: "classifyProperties is not of reference type",
123
+ input: models.Classification{
124
+ Class: "Article",
125
+ BasedOnProperties: []string{"description"},
126
+ ClassifyProperties: []string{"name"},
127
+ },
128
+ expectedError: fmt.Errorf("invalid classification: classifyProperties: property 'name' must be of reference type (cref)"),
129
+ },
130
+
131
+ {
132
+ name: "multiple missing fields (aborts early as we can't validate properties if class is not set)",
133
+ input: models.Classification{},
134
+ expectedError: fmt.Errorf("invalid classification: class must be set"),
135
+ },
136
+
137
+ // specific for knn
138
+ {
139
+ name: "targetWhere is set",
140
+ input: models.Classification{
141
+ Class: "Article",
142
+ BasedOnProperties: []string{"description"},
143
+ ClassifyProperties: []string{"exactCategory"},
144
+ Filters: &models.ClassificationFilters{
145
+ TargetWhere: &models.WhereFilter{Operator: "Equal", Path: []string{"foo"}, ValueText: ptString("bar")},
146
+ },
147
+ Type: "knn",
148
+ },
149
+ expectedError: fmt.Errorf("invalid classification: type is 'knn', but 'targetWhere' filter is set, for 'knn' you cannot limit target data directly, instead limit training data through setting 'trainingSetWhere'"),
150
+ },
151
+
152
+ // specific for text2vec-contextionary-contextual
153
+ {
154
+ name: "classifyProperty has more than one target class",
155
+ input: models.Classification{
156
+ Class: "Article",
157
+ BasedOnProperties: []string{"description"},
158
+ ClassifyProperties: []string{"anyCategory"},
159
+ Type: "text2vec-contextionary-contextual",
160
+ },
161
+ expectedError: fmt.Errorf("invalid classification: classifyProperties: property 'anyCategory' has more than one target class, classification of type 'text2vec-contextionary-contextual' requires exactly one target class"),
162
+ },
163
+
164
+ {
165
+ name: "trainingSetWhere is set",
166
+ input: models.Classification{
167
+ Class: "Article",
168
+ BasedOnProperties: []string{"description"},
169
+ ClassifyProperties: []string{"exactCategory"},
170
+ Filters: &models.ClassificationFilters{
171
+ TrainingSetWhere: &models.WhereFilter{Operator: "Equal", Path: []string{"foo"}, ValueText: ptString("bar")},
172
+ },
173
+ Type: "text2vec-contextionary-contextual",
174
+ },
175
+ expectedError: fmt.Errorf("invalid classification: type is 'text2vec-contextionary-contextual', but 'trainingSetWhere' filter is set, for 'text2vec-contextionary-contextual' there is no training data, instead limit possible target data directly through setting 'targetWhere'"),
176
+ },
177
+ }
178
+
179
+ for _, test := range tests {
180
+ t.Run(test.name, func(t *testing.T) {
181
+ fsg := &fakeSchemaGetter{testSchema()}
182
+ validator := NewValidator(func(name string) (*models.Class, error) { return fsg.ReadOnlyClass(name), nil }, test.input)
183
+ err := validator.Do()
184
+ assert.ErrorAs(t, err, &test.expectedError)
185
+ })
186
+ }
187
+ }
188
+
189
+ func ptString(in string) *string {
190
+ return &in
191
+ }
platform/dbops/binaries/weaviate-src/usecases/classification/writer.go ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package classification
13
+
14
+ import (
15
+ "sync"
16
+ "time"
17
+
18
+ "github.com/sirupsen/logrus"
19
+ enterrors "github.com/weaviate/weaviate/entities/errors"
20
+
21
+ "github.com/weaviate/weaviate/entities/errorcompounder"
22
+ "github.com/weaviate/weaviate/entities/search"
23
+ "github.com/weaviate/weaviate/usecases/objects"
24
+ )
25
+
26
+ type batchWriterResults struct {
27
+ successCount int64
28
+ errorCount int64
29
+ err error
30
+ }
31
+
32
+ func (w batchWriterResults) SuccessCount() int64 {
33
+ return w.successCount
34
+ }
35
+
36
+ func (w batchWriterResults) ErrorCount() int64 {
37
+ return w.errorCount
38
+ }
39
+
40
+ func (w batchWriterResults) Err() error {
41
+ return w.err
42
+ }
43
+
44
+ type batchWriter struct {
45
+ mutex sync.RWMutex
46
+ vectorRepo vectorRepo
47
+ batchItemsCount int
48
+ batchIndex int
49
+ batchObjects objects.BatchObjects
50
+ saveObjectItems chan objects.BatchObjects
51
+ errorCount int64
52
+ ec *errorcompounder.SafeErrorCompounder
53
+ cancel chan struct{}
54
+ batchThreshold int
55
+ logger logrus.FieldLogger
56
+ }
57
+
58
+ func newBatchWriter(vectorRepo vectorRepo, logger logrus.FieldLogger) Writer {
59
+ return &batchWriter{
60
+ vectorRepo: vectorRepo,
61
+ batchItemsCount: 0,
62
+ batchObjects: objects.BatchObjects{},
63
+ saveObjectItems: make(chan objects.BatchObjects),
64
+ errorCount: 0,
65
+ ec: &errorcompounder.SafeErrorCompounder{},
66
+ cancel: make(chan struct{}),
67
+ batchThreshold: 100,
68
+ logger: logger,
69
+ }
70
+ }
71
+
72
+ // Store puts an item to batch list
73
+ func (r *batchWriter) Store(item search.Result) error {
74
+ r.mutex.Lock()
75
+ defer r.mutex.Unlock()
76
+ return r.storeObject(item)
77
+ }
78
+
79
+ // Start starts the batch save goroutine
80
+ func (r *batchWriter) Start() {
81
+ enterrors.GoWrapper(func() { r.batchSave() }, r.logger)
82
+ }
83
+
84
+ // Stop stops the batch save goroutine and saves the last items
85
+ func (r *batchWriter) Stop() WriterResults {
86
+ r.cancel <- struct{}{}
87
+ r.saveObjects(r.batchObjects)
88
+ return batchWriterResults{int64(r.batchItemsCount) - r.errorCount, r.errorCount, r.ec.ToError()}
89
+ }
90
+
91
+ func (r *batchWriter) storeObject(item search.Result) error {
92
+ batchObject := objects.BatchObject{
93
+ UUID: item.ID,
94
+ Object: item.Object(),
95
+ OriginalIndex: r.batchIndex,
96
+ }
97
+ r.batchItemsCount++
98
+ r.batchIndex++
99
+ r.batchObjects = append(r.batchObjects, batchObject)
100
+ if len(r.batchObjects) >= r.batchThreshold {
101
+ r.saveObjectItems <- r.batchObjects
102
+ r.batchObjects = objects.BatchObjects{}
103
+ r.batchIndex = 0
104
+ }
105
+ return nil
106
+ }
107
+
108
+ // This goroutine is created in order to make possible the batch save operation to be run in background
109
+ // and not to block the Store(item) operation invocation which is being done by the worker threads
110
+ func (r *batchWriter) batchSave() {
111
+ for {
112
+ select {
113
+ case <-r.cancel:
114
+ return
115
+ case items := <-r.saveObjectItems:
116
+ r.saveObjects(items)
117
+ }
118
+ }
119
+ }
120
+
121
+ func (r *batchWriter) saveObjects(items objects.BatchObjects) {
122
+ // we need to allow quite some time as this is now a batch, no longer just a
123
+ // single item and we don't have any control over what other load is
124
+ // currently going on, such as imports. TODO: should this be
125
+ // user-configurable?
126
+ ctx, cancel := contextWithTimeout(30 * time.Second)
127
+ defer cancel()
128
+
129
+ if len(items) > 0 {
130
+ saved, err := r.vectorRepo.BatchPutObjects(ctx, items, nil, 0)
131
+ if err != nil {
132
+ r.ec.Add(err)
133
+ }
134
+ for i := range saved {
135
+ if saved[i].Err != nil {
136
+ r.ec.Add(saved[i].Err)
137
+ r.errorCount++
138
+ }
139
+ }
140
+ }
141
+ }
platform/dbops/binaries/weaviate-src/usecases/classification/writer_test.go ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package classification
13
+
14
+ import (
15
+ "fmt"
16
+ "testing"
17
+
18
+ "github.com/sirupsen/logrus/hooks/test"
19
+
20
+ "github.com/go-openapi/strfmt"
21
+ "github.com/stretchr/testify/assert"
22
+ "github.com/weaviate/weaviate/entities/search"
23
+ )
24
+
25
+ var logger, _ = test.NewNullLogger()
26
+
27
+ func testParallelBatchWrite(batchWriter Writer, items search.Results, resultChannel chan<- WriterResults) {
28
+ batchWriter.Start()
29
+ for _, item := range items {
30
+ batchWriter.Store(item)
31
+ }
32
+ res := batchWriter.Stop()
33
+ resultChannel <- res
34
+ }
35
+
36
+ func generateSearchResultsToSave(size int) search.Results {
37
+ items := make(search.Results, 0)
38
+ for i := 0; i < size; i++ {
39
+ res := search.Result{
40
+ ID: strfmt.UUID(fmt.Sprintf("75ba35af-6a08-40ae-b442-3bec69b35%03d", i)),
41
+ ClassName: "Article",
42
+ Vector: []float32{0.78, 0, 0},
43
+ Schema: map[string]interface{}{
44
+ "description": "Barack Obama is a former US president",
45
+ },
46
+ }
47
+ items = append(items, res)
48
+ }
49
+ return items
50
+ }
51
+
52
+ func TestWriter_SimpleWrite(t *testing.T) {
53
+ // given
54
+ searchResultsToBeSaved := testDataToBeClassified()
55
+ vectorRepo := newFakeVectorRepoKNN(searchResultsToBeSaved, testDataAlreadyClassified())
56
+ batchWriter := newBatchWriter(vectorRepo, logger)
57
+ // when
58
+ batchWriter.Start()
59
+ for _, item := range searchResultsToBeSaved {
60
+ batchWriter.Store(item)
61
+ }
62
+ res := batchWriter.Stop()
63
+ // then
64
+ assert.Equal(t, int64(len(searchResultsToBeSaved)), res.SuccessCount())
65
+ assert.Equal(t, int64(0), res.ErrorCount())
66
+ assert.Equal(t, nil, res.Err())
67
+ }
68
+
69
+ func TestWriter_LoadWrites(t *testing.T) {
70
+ // given
71
+ searchResultsCount := 640
72
+ searchResultsToBeSaved := generateSearchResultsToSave(searchResultsCount)
73
+ vectorRepo := newFakeVectorRepoKNN(searchResultsToBeSaved, testDataAlreadyClassified())
74
+ batchWriter := newBatchWriter(vectorRepo, logger)
75
+ // when
76
+ batchWriter.Start()
77
+ for _, item := range searchResultsToBeSaved {
78
+ batchWriter.Store(item)
79
+ }
80
+ res := batchWriter.Stop()
81
+ // then
82
+ assert.Equal(t, int64(searchResultsCount), res.SuccessCount())
83
+ assert.Equal(t, int64(0), res.ErrorCount())
84
+ assert.Equal(t, nil, res.Err())
85
+ }
86
+
87
+ func TestWriter_ParallelLoadWrites(t *testing.T) {
88
+ // given
89
+ searchResultsToBeSavedCount1 := 600
90
+ searchResultsToBeSavedCount2 := 440
91
+ searchResultsToBeSaved1 := generateSearchResultsToSave(searchResultsToBeSavedCount1)
92
+ searchResultsToBeSaved2 := generateSearchResultsToSave(searchResultsToBeSavedCount2)
93
+ vectorRepo1 := newFakeVectorRepoKNN(searchResultsToBeSaved1, testDataAlreadyClassified())
94
+ batchWriter1 := newBatchWriter(vectorRepo1, logger)
95
+ resChannel1 := make(chan WriterResults)
96
+ vectorRepo2 := newFakeVectorRepoKNN(searchResultsToBeSaved2, testDataAlreadyClassified())
97
+ batchWriter2 := newBatchWriter(vectorRepo2, logger)
98
+ resChannel2 := make(chan WriterResults)
99
+ // when
100
+ go testParallelBatchWrite(batchWriter1, searchResultsToBeSaved1, resChannel1)
101
+ go testParallelBatchWrite(batchWriter2, searchResultsToBeSaved2, resChannel2)
102
+ res1 := <-resChannel1
103
+ res2 := <-resChannel2
104
+ // then
105
+ assert.Equal(t, int64(searchResultsToBeSavedCount1), res1.SuccessCount())
106
+ assert.Equal(t, int64(0), res1.ErrorCount())
107
+ assert.Equal(t, nil, res1.Err())
108
+ assert.Equal(t, int64(searchResultsToBeSavedCount2), res2.SuccessCount())
109
+ assert.Equal(t, int64(0), res2.ErrorCount())
110
+ assert.Equal(t, nil, res2.Err())
111
+ }
platform/dbops/binaries/weaviate-src/usecases/cluster/delegate.go ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package cluster
13
+
14
+ import (
15
+ "bytes"
16
+ "encoding/binary"
17
+ "encoding/json"
18
+ "fmt"
19
+ "math/rand"
20
+ "sort"
21
+ "sync"
22
+ "time"
23
+
24
+ enterrors "github.com/weaviate/weaviate/entities/errors"
25
+
26
+ "github.com/hashicorp/memberlist"
27
+ "github.com/sirupsen/logrus"
28
+ )
29
+
30
+ // _OpCode represents the type of supported operation
31
+ type _OpCode uint8
32
+
33
+ const (
34
+ // _ProtoVersion internal protocol version for exchanging messages
35
+ _ProtoVersion uint8 = 1
36
+ // _OpCodeDisk operation code for getting disk space
37
+ _OpCodeDisk _OpCode = 1
38
+ // _ProtoTTL used to decide when to update the cache
39
+ _ProtoTTL = time.Second * 8
40
+ )
41
+
42
+ // spaceMsg is used to notify other nodes about current disk usage
43
+ type spaceMsg struct {
44
+ header
45
+ DiskUsage
46
+ NodeLen uint8 // = len(Node) is required to marshal Node
47
+ Node string // node space
48
+ }
49
+
50
+ // header of an operation
51
+ type header struct {
52
+ // OpCode operation code
53
+ OpCode _OpCode
54
+ // ProtoVersion protocol we will speak
55
+ ProtoVersion uint8
56
+ }
57
+
58
+ // DiskUsage contains total and available space in B
59
+ type DiskUsage struct {
60
+ // Total disk space
61
+ Total uint64
62
+ // Total available space
63
+ Available uint64
64
+ }
65
+
66
+ // NodeInfo disk space
67
+ type NodeInfo struct {
68
+ DiskUsage
69
+ LastTimeMilli int64 // last update time in milliseconds
70
+ }
71
+
72
+ func (d *spaceMsg) marshal() (data []byte, err error) {
73
+ buf := bytes.NewBuffer(make([]byte, 0, 24+len(d.Node)))
74
+ if err := binary.Write(buf, binary.BigEndian, d.header); err != nil {
75
+ return nil, err
76
+ }
77
+ if err := binary.Write(buf, binary.BigEndian, d.DiskUsage); err != nil {
78
+ return nil, err
79
+ }
80
+ // code node name starting by its length
81
+ if err := buf.WriteByte(d.NodeLen); err != nil {
82
+ return nil, err
83
+ }
84
+ _, err = buf.Write([]byte(d.Node))
85
+ return buf.Bytes(), err
86
+ }
87
+
88
+ func (d *spaceMsg) unmarshal(data []byte) (err error) {
89
+ rd := bytes.NewReader(data)
90
+ if err = binary.Read(rd, binary.BigEndian, &d.header); err != nil {
91
+ return
92
+ }
93
+ if err = binary.Read(rd, binary.BigEndian, &d.DiskUsage); err != nil {
94
+ return
95
+ }
96
+
97
+ // decode node name start by its length
98
+ if d.NodeLen, err = rd.ReadByte(); err != nil {
99
+ return
100
+ }
101
+ begin := len(data) - rd.Len()
102
+ end := begin + int(d.NodeLen)
103
+ // make sure this version is backward compatible
104
+ if _ProtoVersion <= 1 && begin+int(d.NodeLen) != len(data) {
105
+ begin-- // since previous version doesn't encode the length
106
+ end = len(data)
107
+ d.NodeLen = uint8(end - begin)
108
+ }
109
+ d.Node = string(data[begin:end])
110
+ return nil
111
+ }
112
+
113
+ // delegate implements the memberList delegate interface
114
+ type delegate struct {
115
+ Name string
116
+ dataPath string
117
+ log logrus.FieldLogger
118
+ sync.Mutex
119
+ Cache map[string]NodeInfo
120
+
121
+ mutex sync.Mutex
122
+ hostInfo NodeInfo
123
+
124
+ metadata NodeMetadata
125
+ }
126
+
127
+ type NodeMetadata struct {
128
+ RestPort int `json:"rest_port"`
129
+ GrpcPort int `json:"grpc_port"`
130
+ }
131
+
132
+ func (d *delegate) setOwnSpace(x DiskUsage) {
133
+ d.mutex.Lock()
134
+ d.hostInfo = NodeInfo{DiskUsage: x, LastTimeMilli: time.Now().UnixMilli()}
135
+ d.mutex.Unlock()
136
+ }
137
+
138
+ func (d *delegate) ownInfo() NodeInfo {
139
+ d.mutex.Lock()
140
+ defer d.mutex.Unlock()
141
+ return d.hostInfo
142
+ }
143
+
144
+ // init must be called first to initialize the cache
145
+ func (d *delegate) init(diskSpace func(path string) (DiskUsage, error)) error {
146
+ d.Cache = make(map[string]NodeInfo, 32)
147
+ if diskSpace == nil {
148
+ return fmt.Errorf("function calculating disk space cannot be empty")
149
+ }
150
+ lastTime := time.Now()
151
+ minUpdatePeriod := time.Second + _ProtoTTL/3
152
+ space, err := diskSpace(d.dataPath)
153
+ if err != nil {
154
+ lastTime = lastTime.Add(-minUpdatePeriod)
155
+ d.log.WithError(err).Error("calculate disk space")
156
+ }
157
+
158
+ d.setOwnSpace(space)
159
+ d.set(d.Name, NodeInfo{space, lastTime.UnixMilli()}) // cache
160
+
161
+ // delegate remains alive throughout the entire program.
162
+ enterrors.GoWrapper(func() { d.updater(_ProtoTTL, minUpdatePeriod, diskSpace) }, d.log)
163
+ return nil
164
+ }
165
+
166
+ // NodeMeta is used to retrieve meta-data about the current node
167
+ // when broadcasting an alive message. It's length is limited to
168
+ // the given byte size. This metadata is available in the Node structure.
169
+ func (d *delegate) NodeMeta(limit int) (meta []byte) {
170
+ data, err := json.Marshal(d.metadata)
171
+ if err != nil {
172
+ return nil
173
+ }
174
+ if len(data) > limit {
175
+ return nil
176
+ }
177
+ return data
178
+ }
179
+
180
+ // LocalState is used for a TCP Push/Pull. This is sent to
181
+ // the remote side in addition to the membership information. Any
182
+ // data can be sent here. See MergeRemoteState as well. The `join`
183
+ // boolean indicates this is for a join instead of a push/pull.
184
+ func (d *delegate) LocalState(join bool) []byte {
185
+ var (
186
+ info = d.ownInfo()
187
+ err error
188
+ )
189
+
190
+ d.set(d.Name, info) // cache new value
191
+
192
+ x := spaceMsg{
193
+ header{
194
+ OpCode: _OpCodeDisk,
195
+ ProtoVersion: _ProtoVersion,
196
+ },
197
+ info.DiskUsage,
198
+ uint8(len(d.Name)),
199
+ d.Name,
200
+ }
201
+ bytes, err := x.marshal()
202
+ if err != nil {
203
+ d.log.WithField("action", "delegate.local_state.marshal").WithError(err).
204
+ Error("failed to marshal local state")
205
+ return nil
206
+ }
207
+ return bytes
208
+ }
209
+
210
+ // MergeRemoteState is invoked after a TCP Push/Pull. This is the
211
+ // state received from the remote side and is the result of the
212
+ // remote side's LocalState call. The 'join'
213
+ // boolean indicates this is for a join instead of a push/pull.
214
+ func (d *delegate) MergeRemoteState(data []byte, join bool) {
215
+ // Does operation match _OpCodeDisk
216
+ if _OpCode(data[0]) != _OpCodeDisk {
217
+ return
218
+ }
219
+ var x spaceMsg
220
+ if err := x.unmarshal(data); err != nil || x.Node == "" {
221
+ d.log.WithFields(logrus.Fields{
222
+ "action": "delegate.merge_remote.unmarshal",
223
+ "data": string(data),
224
+ }).WithError(err).Error("failed to unmarshal remote state")
225
+ return
226
+ }
227
+ info := NodeInfo{x.DiskUsage, time.Now().UnixMilli()}
228
+ d.set(x.Node, info)
229
+ }
230
+
231
+ func (d *delegate) NotifyMsg(data []byte) {}
232
+
233
+ func (d *delegate) GetBroadcasts(overhead, limit int) [][]byte {
234
+ return nil
235
+ }
236
+
237
+ // get returns info about about a specific node in the cluster
238
+ func (d *delegate) get(node string) (NodeInfo, bool) {
239
+ d.Lock()
240
+ defer d.Unlock()
241
+ x, ok := d.Cache[node]
242
+ return x, ok
243
+ }
244
+
245
+ func (d *delegate) set(node string, x NodeInfo) {
246
+ d.Lock()
247
+ defer d.Unlock()
248
+ d.Cache[node] = x
249
+ }
250
+
251
+ // delete key from the cache
252
+ func (d *delegate) delete(node string) {
253
+ d.Lock()
254
+ defer d.Unlock()
255
+ delete(d.Cache, node)
256
+ }
257
+
258
+ // sortCandidates by the amount of free space in descending order
259
+ //
260
+ // Two nodes are considered equivalent if the difference between their
261
+ // free spaces is less than 32MB.
262
+ // The free space is just an rough estimate of the actual amount.
263
+ // The Lower bound 32MB helps to mitigate the risk of selecting same set of nodes
264
+ // when selections happens concurrently on different initiator nodes.
265
+ func (d *delegate) sortCandidates(names []string) []string {
266
+ rand.Shuffle(len(names), func(i, j int) { names[i], names[j] = names[j], names[i] })
267
+
268
+ d.Lock()
269
+ defer d.Unlock()
270
+ m := d.Cache
271
+ sort.Slice(names, func(i, j int) bool {
272
+ return (m[names[j]].Available >> 25) < (m[names[i]].Available >> 25)
273
+ })
274
+
275
+ return names
276
+ }
277
+
278
+ // updater a function which updates node information periodically
279
+ func (d *delegate) updater(period, minPeriod time.Duration, du func(path string) (DiskUsage, error)) {
280
+ t := time.NewTicker(period)
281
+ defer t.Stop()
282
+ curTime := time.Now()
283
+ for range t.C {
284
+ if time.Since(curTime) < minPeriod { // too short
285
+ continue // wait for next cycle to avoid overwhelming the disk
286
+ }
287
+ space, err := du(d.dataPath)
288
+ if err != nil {
289
+ d.log.WithField("action", "delegate.local_state.disk_usage").WithError(err).
290
+ Error("disk space updater failed")
291
+ } else {
292
+ d.setOwnSpace(space)
293
+ }
294
+ curTime = time.Now()
295
+ }
296
+ }
297
+
298
+ // events implement memberlist.EventDelegate interface
299
+ // EventDelegate is a simpler delegate that is used only to receive
300
+ // notifications about members joining and leaving. The methods in this
301
+ // delegate may be called by multiple goroutines, but never concurrently.
302
+ // This allows you to reason about ordering.
303
+ type events struct {
304
+ d *delegate
305
+ }
306
+
307
+ // NotifyJoin is invoked when a node is detected to have joined.
308
+ // The Node argument must not be modified.
309
+ func (e events) NotifyJoin(*memberlist.Node) {}
310
+
311
+ // NotifyLeave is invoked when a node is detected to have left.
312
+ // The Node argument must not be modified.
313
+ func (e events) NotifyLeave(node *memberlist.Node) {
314
+ e.d.delete(node.Name)
315
+ }
316
+
317
+ // NotifyUpdate is invoked when a node is detected to have
318
+ // updated, usually involving the meta data. The Node argument
319
+ // must not be modified.
320
+ func (e events) NotifyUpdate(*memberlist.Node) {}
platform/dbops/binaries/weaviate-src/usecases/cluster/delegate_test.go ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package cluster
13
+
14
+ import (
15
+ "fmt"
16
+ "testing"
17
+ "time"
18
+
19
+ "github.com/hashicorp/memberlist"
20
+ "github.com/pkg/errors"
21
+ "github.com/sirupsen/logrus/hooks/test"
22
+ "github.com/stretchr/testify/assert"
23
+ )
24
+
25
+ func TestDiskSpaceMarshal(t *testing.T) {
26
+ for _, name := range []string{"", "host-12:1", "2", "00", "-jhd"} {
27
+ want := spaceMsg{
28
+ header{
29
+ ProtoVersion: uint8(1),
30
+ OpCode: _OpCode(2),
31
+ },
32
+ DiskUsage{
33
+ Total: 256,
34
+ Available: 3,
35
+ },
36
+ uint8(len(name)),
37
+ name,
38
+ }
39
+ bytes, err := want.marshal()
40
+ assert.Nil(t, err)
41
+ got := spaceMsg{}
42
+ err = got.unmarshal(bytes)
43
+ assert.Nil(t, err)
44
+ assert.Equal(t, want, got)
45
+ }
46
+
47
+ // simulate old version
48
+ x := spaceMsg{
49
+ header{
50
+ ProtoVersion: uint8(1),
51
+ OpCode: _OpCode(2),
52
+ },
53
+ DiskUsage{
54
+ Total: 256,
55
+ Available: 3,
56
+ },
57
+ uint8('0'),
58
+ "123",
59
+ }
60
+ bytes, err := x.marshal()
61
+ want := x
62
+ want.NodeLen = 4
63
+ want.Node = "0123"
64
+ assert.Nil(t, err)
65
+ got := spaceMsg{}
66
+ err = got.unmarshal(bytes)
67
+ assert.Nil(t, err)
68
+ assert.Equal(t, want, got)
69
+ }
70
+
71
+ func TestDelegateGetSet(t *testing.T) {
72
+ logger, _ := test.NewNullLogger()
73
+ now := time.Now().UnixMilli() - 1
74
+ st := State{
75
+ delegate: delegate{
76
+ Name: "ABC",
77
+ dataPath: ".",
78
+ log: logger,
79
+ Cache: make(map[string]NodeInfo, 32),
80
+ },
81
+ }
82
+ st.delegate.NotifyMsg(nil)
83
+ st.delegate.GetBroadcasts(0, 0)
84
+ st.delegate.NodeMeta(0)
85
+ spaces := make([]spaceMsg, 32)
86
+ for i := range spaces {
87
+ node := fmt.Sprintf("N-%d", i+1)
88
+ spaces[i] = spaceMsg{
89
+ header: header{
90
+ OpCode: _OpCodeDisk,
91
+ ProtoVersion: _ProtoVersion + 2,
92
+ },
93
+ DiskUsage: DiskUsage{
94
+ uint64(i + 1),
95
+ uint64(i),
96
+ },
97
+ Node: node,
98
+ NodeLen: uint8(len(node)),
99
+ }
100
+ }
101
+
102
+ done := make(chan struct{})
103
+ go func() {
104
+ for _, x := range spaces {
105
+ bytes, _ := x.marshal()
106
+ st.delegate.MergeRemoteState(bytes, false)
107
+ }
108
+ done <- struct{}{}
109
+ }()
110
+
111
+ _, ok := st.delegate.get("X")
112
+ assert.False(t, ok)
113
+
114
+ for _, x := range spaces {
115
+ space, ok := st.NodeInfo(x.Node)
116
+ if ok {
117
+ assert.Equal(t, x.DiskUsage, space.DiskUsage)
118
+ }
119
+ }
120
+ <-done
121
+ for _, x := range spaces {
122
+ info, ok := st.NodeInfo(x.Node)
123
+ assert.Greater(t, info.LastTimeMilli, now)
124
+ want := NodeInfo{x.DiskUsage, info.LastTimeMilli}
125
+ assert.Equal(t, want, info)
126
+ assert.True(t, ok)
127
+ st.delegate.delete(x.Node)
128
+
129
+ }
130
+ assert.Empty(t, st.delegate.Cache)
131
+ st.delegate.init(diskSpace)
132
+ assert.Equal(t, 1, len(st.delegate.Cache))
133
+
134
+ st.delegate.MergeRemoteState(st.delegate.LocalState(false), false)
135
+ space, ok := st.NodeInfo(st.delegate.Name)
136
+ assert.True(t, ok)
137
+ assert.Greater(t, space.Total, space.Available)
138
+ }
139
+
140
+ func TestDelegateMergeRemoteState(t *testing.T) {
141
+ logger, _ := test.NewNullLogger()
142
+ var (
143
+ node = "N1"
144
+ d = delegate{
145
+ Name: node,
146
+ dataPath: ".",
147
+ log: logger,
148
+ Cache: make(map[string]NodeInfo, 32),
149
+ }
150
+ x = spaceMsg{
151
+ header{
152
+ OpCode: _OpCodeDisk,
153
+ ProtoVersion: _ProtoVersion,
154
+ },
155
+ DiskUsage{2, 1},
156
+ uint8(len(node)),
157
+ node,
158
+ }
159
+ )
160
+ // valid operation payload
161
+ bytes, err := x.marshal()
162
+ assert.Nil(t, err)
163
+ d.MergeRemoteState(bytes, false)
164
+ _, ok := d.get(node)
165
+ assert.True(t, ok)
166
+
167
+ node = "N2"
168
+ // invalid payload => expect marshalling error
169
+ d.MergeRemoteState(bytes[:4], false)
170
+ assert.Nil(t, err)
171
+ _, ok = d.get(node)
172
+ assert.False(t, ok)
173
+
174
+ // valid payload but operation is not supported
175
+ node = "N2"
176
+ x.header.OpCode = _OpCodeDisk + 2
177
+ bytes, err = x.marshal()
178
+ d.MergeRemoteState(bytes, false)
179
+ assert.Nil(t, err)
180
+ _, ok = d.get(node)
181
+ assert.False(t, ok)
182
+ }
183
+
184
+ func TestDelegateSort(t *testing.T) {
185
+ now := time.Now().UnixMilli()
186
+ GB := uint64(1) << 30
187
+ delegate := delegate{
188
+ Name: "ABC",
189
+ dataPath: ".",
190
+ Cache: make(map[string]NodeInfo, 32),
191
+ }
192
+
193
+ delegate.set("N1", NodeInfo{DiskUsage{Available: GB}, now})
194
+ delegate.set("N2", NodeInfo{DiskUsage{Available: 3 * GB}, now})
195
+ delegate.set("N3", NodeInfo{DiskUsage{Available: 2 * GB}, now})
196
+ delegate.set("N4", NodeInfo{DiskUsage{Available: 4 * GB}, now})
197
+ got := delegate.sortCandidates([]string{"N1", "N0", "N2", "N4", "N3"})
198
+ assert.Equal(t, []string{"N4", "N2", "N3", "N1", "N0"}, got)
199
+
200
+ delegate.set("N1", NodeInfo{DiskUsage{Available: GB - 10}, now})
201
+ // insert equivalent nodes "N2" and "N3"
202
+ delegate.set("N2", NodeInfo{DiskUsage{Available: GB + 128}, now})
203
+ delegate.set("N3", NodeInfo{DiskUsage{Available: GB + 512}, now})
204
+ // one block more
205
+ delegate.set("N4", NodeInfo{DiskUsage{Available: GB + 1<<25}, now})
206
+ got = delegate.sortCandidates([]string{"N1", "N0", "N2", "N3", "N4"})
207
+ if got[1] == "N2" {
208
+ assert.Equal(t, []string{"N4", "N2", "N3", "N1", "N0"}, got)
209
+ } else {
210
+ assert.Equal(t, []string{"N4", "N3", "N2", "N1", "N0"}, got)
211
+ }
212
+ }
213
+
214
+ func TestDelegateCleanUp(t *testing.T) {
215
+ st := State{
216
+ delegate: delegate{
217
+ Name: "N0",
218
+ dataPath: ".",
219
+ },
220
+ }
221
+ diskSpace := func(path string) (DiskUsage, error) {
222
+ return DiskUsage{100, 50}, nil
223
+ }
224
+ st.delegate.init(diskSpace)
225
+ _, ok := st.delegate.get("N0")
226
+ assert.True(t, ok, "N0 must exist")
227
+ st.delegate.set("N1", NodeInfo{LastTimeMilli: 1})
228
+ st.delegate.set("N2", NodeInfo{LastTimeMilli: 2})
229
+ handler := events{&st.delegate}
230
+ handler.NotifyJoin(nil)
231
+ handler.NotifyUpdate(nil)
232
+ handler.NotifyLeave(&memberlist.Node{Name: "N0"})
233
+ handler.NotifyLeave(&memberlist.Node{Name: "N1"})
234
+ handler.NotifyLeave(&memberlist.Node{Name: "N2"})
235
+ assert.Empty(t, st.delegate.Cache)
236
+ }
237
+
238
+ func TestDelegateLocalState(t *testing.T) {
239
+ now := time.Now().UnixMilli() - 1
240
+ errAny := errors.New("any error")
241
+ logger, _ := test.NewNullLogger()
242
+
243
+ t.Run("FirstError", func(t *testing.T) {
244
+ d := delegate{
245
+ Name: "N0",
246
+ dataPath: ".",
247
+ log: logger,
248
+ Cache: map[string]NodeInfo{},
249
+ }
250
+ du := func(path string) (DiskUsage, error) { return DiskUsage{}, errAny }
251
+ d.init(du)
252
+
253
+ // error reading disk space
254
+ d.LocalState(true)
255
+ assert.Len(t, d.Cache, 1)
256
+ })
257
+
258
+ t.Run("Success", func(t *testing.T) {
259
+ d := delegate{
260
+ Name: "N0",
261
+ dataPath: ".",
262
+ log: logger,
263
+ Cache: map[string]NodeInfo{},
264
+ }
265
+ du := func(path string) (DiskUsage, error) { return DiskUsage{5, 1}, nil }
266
+ d.init(du)
267
+ // successful case
268
+ d.LocalState(true)
269
+ got, ok := d.get("N0")
270
+ assert.True(t, ok)
271
+ assert.Greater(t, got.LastTimeMilli, now)
272
+ assert.Equal(t, DiskUsage{5, 1}, got.DiskUsage)
273
+ })
274
+ }
275
+
276
+ func TestDelegateUpdater(t *testing.T) {
277
+ logger, _ := test.NewNullLogger()
278
+ now := time.Now().UnixMilli() - 1
279
+
280
+ d := delegate{
281
+ Name: "N0",
282
+ dataPath: ".",
283
+ log: logger,
284
+ Cache: map[string]NodeInfo{},
285
+ }
286
+ err := d.init(nil)
287
+ assert.NotNil(t, err)
288
+ doneCh := make(chan bool)
289
+ nCalls := uint64(0)
290
+ du := func(path string) (DiskUsage, error) {
291
+ nCalls++
292
+ if nCalls == 1 || nCalls == 3 {
293
+ return DiskUsage{2 * nCalls, nCalls}, nil
294
+ }
295
+ if nCalls == 2 {
296
+ return DiskUsage{}, fmt.Errorf("any")
297
+ }
298
+ if nCalls == 4 {
299
+ close(doneCh)
300
+ }
301
+ return DiskUsage{}, fmt.Errorf("any")
302
+ }
303
+ go d.updater(time.Millisecond, 5*time.Millisecond, du)
304
+
305
+ <-doneCh
306
+
307
+ // error reading disk space
308
+ d.LocalState(true)
309
+ got, ok := d.get("N0")
310
+ assert.True(t, ok)
311
+ assert.Greater(t, got.LastTimeMilli, now)
312
+ assert.Equal(t, DiskUsage{3 * 2, 3}, got.DiskUsage)
313
+ }
platform/dbops/binaries/weaviate-src/usecases/cluster/disk_use_unix.go ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ //go:build !windows
13
+
14
+ package cluster
15
+
16
+ import (
17
+ "syscall"
18
+ )
19
+
20
+ // diskSpace return the disk space usage
21
+ func diskSpace(path string) (DiskUsage, error) {
22
+ fs := syscall.Statfs_t{}
23
+ err := syscall.Statfs(path, &fs)
24
+ if err != nil {
25
+ return DiskUsage{}, err
26
+ }
27
+ return DiskUsage{
28
+ Total: fs.Blocks * uint64(fs.Bsize),
29
+ Available: fs.Bavail * uint64(fs.Bsize),
30
+ }, nil
31
+ }
platform/dbops/binaries/weaviate-src/usecases/cluster/disk_use_windows.go ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ //go:build windows
13
+
14
+ package cluster
15
+
16
+ import (
17
+ "golang.org/x/sys/windows"
18
+ )
19
+
20
+ // diskSpace return the disk space usage
21
+ func diskSpace(path string) (DiskUsage, error) {
22
+ var freeBytesAvailableToCaller, totalBytes, totalFreeBytes uint64
23
+
24
+ err := windows.GetDiskFreeSpaceEx(
25
+ windows.StringToUTF16Ptr(path),
26
+ &freeBytesAvailableToCaller,
27
+ &totalBytes,
28
+ &totalFreeBytes,
29
+ )
30
+ if err != nil {
31
+ return DiskUsage{}, err
32
+ }
33
+
34
+ return DiskUsage{
35
+ Total: totalBytes,
36
+ Available: freeBytesAvailableToCaller,
37
+ }, nil
38
+ }
platform/dbops/binaries/weaviate-src/usecases/cluster/ideal_node_list.go ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package cluster
13
+
14
+ import (
15
+ "fmt"
16
+ "sort"
17
+ "strings"
18
+ "sync"
19
+ "time"
20
+
21
+ "github.com/sirupsen/logrus"
22
+ enterrors "github.com/weaviate/weaviate/entities/errors"
23
+ )
24
+
25
+ type IdealClusterState struct {
26
+ memberNames []string
27
+ currentState MemberLister
28
+ lock sync.Mutex
29
+ }
30
+
31
+ func NewIdealClusterState(s MemberLister, logger logrus.FieldLogger) *IdealClusterState {
32
+ ics := &IdealClusterState{currentState: s}
33
+ enterrors.GoWrapper(func() { ics.startPolling() }, logger)
34
+ return ics
35
+ }
36
+
37
+ // Validate returns an error if the actual state does not match the assumed
38
+ // ideal state, e.g. because a node has died, or left unexpectedly.
39
+ func (ics *IdealClusterState) Validate() error {
40
+ ics.lock.Lock()
41
+ defer ics.lock.Unlock()
42
+
43
+ actual := map[string]struct{}{}
44
+ for _, name := range ics.currentState.AllNames() {
45
+ actual[name] = struct{}{}
46
+ }
47
+
48
+ var missing []string
49
+ for _, name := range ics.memberNames {
50
+ if _, ok := actual[name]; !ok {
51
+ missing = append(missing, name)
52
+ }
53
+ }
54
+
55
+ if len(missing) > 0 {
56
+ return fmt.Errorf("node(s) %s unhealthy or unavailable",
57
+ strings.Join(missing, ", "))
58
+ }
59
+
60
+ return nil
61
+ }
62
+
63
+ func (ics *IdealClusterState) Members() []string {
64
+ ics.lock.Lock()
65
+ defer ics.lock.Unlock()
66
+
67
+ return ics.memberNames
68
+ }
69
+
70
+ func (ics *IdealClusterState) startPolling() {
71
+ t := time.NewTicker(1 * time.Second)
72
+ for {
73
+ <-t.C
74
+ current := ics.currentState.AllNames()
75
+ ics.extendList(current)
76
+ }
77
+ }
78
+
79
+ func (ics *IdealClusterState) extendList(current []string) {
80
+ ics.lock.Lock()
81
+ defer ics.lock.Unlock()
82
+
83
+ var unknown []string
84
+ known := map[string]struct{}{}
85
+ for _, name := range ics.memberNames {
86
+ known[name] = struct{}{}
87
+ }
88
+
89
+ for _, name := range current {
90
+ if _, ok := known[name]; !ok {
91
+ unknown = append(unknown, name)
92
+ }
93
+ }
94
+
95
+ ics.memberNames = append(ics.memberNames, unknown...)
96
+ sort.Strings(ics.memberNames)
97
+ }
platform/dbops/binaries/weaviate-src/usecases/cluster/iterator.go ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package cluster
13
+
14
+ import (
15
+ "fmt"
16
+ "math/rand"
17
+ )
18
+
19
+ type NodeIterationStrategy int
20
+
21
+ const (
22
+ StartRandom NodeIterationStrategy = iota
23
+ StartAfter
24
+ )
25
+
26
+ type NodeIterator struct {
27
+ hostnames []string
28
+ state int
29
+ }
30
+
31
+ type HostnameSource interface {
32
+ AllNames() []string
33
+ }
34
+
35
+ func NewNodeIterator(nodeNames []string,
36
+ strategy NodeIterationStrategy,
37
+ ) (*NodeIterator, error) {
38
+ if strategy != StartRandom && strategy != StartAfter {
39
+ return nil, fmt.Errorf("unsupported strategy: %v", strategy)
40
+ }
41
+
42
+ startState := 0
43
+ if strategy == StartRandom {
44
+ startState = rand.Intn(len(nodeNames))
45
+ }
46
+
47
+ return &NodeIterator{
48
+ hostnames: nodeNames,
49
+ state: startState,
50
+ }, nil
51
+ }
52
+
53
+ func (n *NodeIterator) SetStartNode(startNode string) {
54
+ for i, node := range n.hostnames {
55
+ if node == startNode {
56
+ n.state = i + 1
57
+ if n.state == len(n.hostnames) {
58
+ n.state = 0
59
+ }
60
+ break
61
+ }
62
+ }
63
+ }
64
+
65
+ func (n *NodeIterator) Next() string {
66
+ curr := n.hostnames[n.state]
67
+ n.state++
68
+ if n.state == len(n.hostnames) {
69
+ n.state = 0
70
+ }
71
+
72
+ return curr
73
+ }
platform/dbops/binaries/weaviate-src/usecases/cluster/iterator_test.go ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package cluster
13
+
14
+ import (
15
+ "testing"
16
+
17
+ "github.com/stretchr/testify/assert"
18
+ "github.com/stretchr/testify/require"
19
+ )
20
+
21
+ func TestNodeIteration(t *testing.T) {
22
+ source := []string{"node1", "node2", "node3", "node4"}
23
+ it, err := NewNodeIterator(source, StartRandom)
24
+ require.Nil(t, err)
25
+
26
+ found := map[string]int{}
27
+
28
+ for i := 0; i < 20; i++ {
29
+ host := it.Next()
30
+ found[host]++
31
+ }
32
+
33
+ // each host must be contained 5 times
34
+ assert.Equal(t, found["node1"], 5)
35
+ assert.Equal(t, found["node2"], 5)
36
+ assert.Equal(t, found["node3"], 5)
37
+ assert.Equal(t, found["node4"], 5)
38
+ }
39
+
40
+ func TestNodeIterationStartAfter(t *testing.T) {
41
+ source := []string{"node1", "node2", "node3", "node4"}
42
+ it, err := NewNodeIterator(source, StartAfter)
43
+ it.SetStartNode("node2")
44
+ require.Nil(t, err)
45
+
46
+ iterations := 3
47
+ found := make([]string, iterations)
48
+ for i := 0; i < iterations; i++ {
49
+ host := it.Next()
50
+ found[i] = host
51
+ }
52
+
53
+ expected := []string{"node3", "node4", "node1"}
54
+ assert.Equal(t, expected, found)
55
+ }
platform/dbops/binaries/weaviate-src/usecases/cluster/log_workaround.go ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package cluster
13
+
14
+ import (
15
+ "regexp"
16
+
17
+ "github.com/sirupsen/logrus"
18
+ )
19
+
20
+ type logParser struct {
21
+ logrus logrus.FieldLogger
22
+ regexp *regexp.Regexp
23
+ }
24
+
25
+ func newLogParser(logrus logrus.FieldLogger) *logParser {
26
+ return &logParser{
27
+ logrus: logrus,
28
+ regexp: regexp.MustCompile(`(.*)\[(DEBUG|ERR|ERROR|INFO|WARNING|WARN)](.*)`),
29
+ }
30
+ }
31
+
32
+ func (l *logParser) Write(in []byte) (int, error) {
33
+ res := l.regexp.FindSubmatch(in)
34
+ if len(res) != 4 {
35
+ // unable to parse log message
36
+ l.logrus.WithField("in", in).Warn("unable to parse memberlist log message")
37
+ }
38
+
39
+ switch string(res[2]) {
40
+ case "ERR", "ERROR":
41
+ l.logrus.Error(string(res[3]))
42
+ case "WARN", "WARNING":
43
+ l.logrus.Warn(string(res[3]))
44
+ case "DEBUG":
45
+ l.logrus.Debug(string(res[3]))
46
+ case "INFO":
47
+ l.logrus.Info(string(res[3]))
48
+ default:
49
+ l.logrus.WithField("in", in).Warn("unable to parse memberlist log level from message")
50
+ }
51
+
52
+ return len(in), nil
53
+ }
platform/dbops/binaries/weaviate-src/usecases/cluster/mock_node_selector.go ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ // Code generated by mockery v2.53.2. DO NOT EDIT.
13
+
14
+ package cluster
15
+
16
+ import mock "github.com/stretchr/testify/mock"
17
+
18
+ // MockNodeSelector is an autogenerated mock type for the NodeSelector type
19
+ type MockNodeSelector struct {
20
+ mock.Mock
21
+ }
22
+
23
+ type MockNodeSelector_Expecter struct {
24
+ mock *mock.Mock
25
+ }
26
+
27
+ func (_m *MockNodeSelector) EXPECT() *MockNodeSelector_Expecter {
28
+ return &MockNodeSelector_Expecter{mock: &_m.Mock}
29
+ }
30
+
31
+ // AllHostnames provides a mock function with no fields
32
+ func (_m *MockNodeSelector) AllHostnames() []string {
33
+ ret := _m.Called()
34
+
35
+ if len(ret) == 0 {
36
+ panic("no return value specified for AllHostnames")
37
+ }
38
+
39
+ var r0 []string
40
+ if rf, ok := ret.Get(0).(func() []string); ok {
41
+ r0 = rf()
42
+ } else {
43
+ if ret.Get(0) != nil {
44
+ r0 = ret.Get(0).([]string)
45
+ }
46
+ }
47
+
48
+ return r0
49
+ }
50
+
51
+ // MockNodeSelector_AllHostnames_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AllHostnames'
52
+ type MockNodeSelector_AllHostnames_Call struct {
53
+ *mock.Call
54
+ }
55
+
56
+ // AllHostnames is a helper method to define mock.On call
57
+ func (_e *MockNodeSelector_Expecter) AllHostnames() *MockNodeSelector_AllHostnames_Call {
58
+ return &MockNodeSelector_AllHostnames_Call{Call: _e.mock.On("AllHostnames")}
59
+ }
60
+
61
+ func (_c *MockNodeSelector_AllHostnames_Call) Run(run func()) *MockNodeSelector_AllHostnames_Call {
62
+ _c.Call.Run(func(args mock.Arguments) {
63
+ run()
64
+ })
65
+ return _c
66
+ }
67
+
68
+ func (_c *MockNodeSelector_AllHostnames_Call) Return(_a0 []string) *MockNodeSelector_AllHostnames_Call {
69
+ _c.Call.Return(_a0)
70
+ return _c
71
+ }
72
+
73
+ func (_c *MockNodeSelector_AllHostnames_Call) RunAndReturn(run func() []string) *MockNodeSelector_AllHostnames_Call {
74
+ _c.Call.Return(run)
75
+ return _c
76
+ }
77
+
78
+ // LocalName provides a mock function with no fields
79
+ func (_m *MockNodeSelector) LocalName() string {
80
+ ret := _m.Called()
81
+
82
+ if len(ret) == 0 {
83
+ panic("no return value specified for LocalName")
84
+ }
85
+
86
+ var r0 string
87
+ if rf, ok := ret.Get(0).(func() string); ok {
88
+ r0 = rf()
89
+ } else {
90
+ r0 = ret.Get(0).(string)
91
+ }
92
+
93
+ return r0
94
+ }
95
+
96
+ // MockNodeSelector_LocalName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LocalName'
97
+ type MockNodeSelector_LocalName_Call struct {
98
+ *mock.Call
99
+ }
100
+
101
+ // LocalName is a helper method to define mock.On call
102
+ func (_e *MockNodeSelector_Expecter) LocalName() *MockNodeSelector_LocalName_Call {
103
+ return &MockNodeSelector_LocalName_Call{Call: _e.mock.On("LocalName")}
104
+ }
105
+
106
+ func (_c *MockNodeSelector_LocalName_Call) Run(run func()) *MockNodeSelector_LocalName_Call {
107
+ _c.Call.Run(func(args mock.Arguments) {
108
+ run()
109
+ })
110
+ return _c
111
+ }
112
+
113
+ func (_c *MockNodeSelector_LocalName_Call) Return(_a0 string) *MockNodeSelector_LocalName_Call {
114
+ _c.Call.Return(_a0)
115
+ return _c
116
+ }
117
+
118
+ func (_c *MockNodeSelector_LocalName_Call) RunAndReturn(run func() string) *MockNodeSelector_LocalName_Call {
119
+ _c.Call.Return(run)
120
+ return _c
121
+ }
122
+
123
+ // NodeAddress provides a mock function with given fields: id
124
+ func (_m *MockNodeSelector) NodeAddress(id string) string {
125
+ ret := _m.Called(id)
126
+
127
+ if len(ret) == 0 {
128
+ panic("no return value specified for NodeAddress")
129
+ }
130
+
131
+ var r0 string
132
+ if rf, ok := ret.Get(0).(func(string) string); ok {
133
+ r0 = rf(id)
134
+ } else {
135
+ r0 = ret.Get(0).(string)
136
+ }
137
+
138
+ return r0
139
+ }
140
+
141
+ // MockNodeSelector_NodeAddress_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NodeAddress'
142
+ type MockNodeSelector_NodeAddress_Call struct {
143
+ *mock.Call
144
+ }
145
+
146
+ // NodeAddress is a helper method to define mock.On call
147
+ // - id string
148
+ func (_e *MockNodeSelector_Expecter) NodeAddress(id interface{}) *MockNodeSelector_NodeAddress_Call {
149
+ return &MockNodeSelector_NodeAddress_Call{Call: _e.mock.On("NodeAddress", id)}
150
+ }
151
+
152
+ func (_c *MockNodeSelector_NodeAddress_Call) Run(run func(id string)) *MockNodeSelector_NodeAddress_Call {
153
+ _c.Call.Run(func(args mock.Arguments) {
154
+ run(args[0].(string))
155
+ })
156
+ return _c
157
+ }
158
+
159
+ func (_c *MockNodeSelector_NodeAddress_Call) Return(_a0 string) *MockNodeSelector_NodeAddress_Call {
160
+ _c.Call.Return(_a0)
161
+ return _c
162
+ }
163
+
164
+ func (_c *MockNodeSelector_NodeAddress_Call) RunAndReturn(run func(string) string) *MockNodeSelector_NodeAddress_Call {
165
+ _c.Call.Return(run)
166
+ return _c
167
+ }
168
+
169
+ // NodeGRPCPort provides a mock function with given fields: id
170
+ func (_m *MockNodeSelector) NodeGRPCPort(id string) (int, error) {
171
+ ret := _m.Called(id)
172
+
173
+ if len(ret) == 0 {
174
+ panic("no return value specified for NodeGRPCPort")
175
+ }
176
+
177
+ var r0 int
178
+ var r1 error
179
+ if rf, ok := ret.Get(0).(func(string) (int, error)); ok {
180
+ return rf(id)
181
+ }
182
+ if rf, ok := ret.Get(0).(func(string) int); ok {
183
+ r0 = rf(id)
184
+ } else {
185
+ r0 = ret.Get(0).(int)
186
+ }
187
+
188
+ if rf, ok := ret.Get(1).(func(string) error); ok {
189
+ r1 = rf(id)
190
+ } else {
191
+ r1 = ret.Error(1)
192
+ }
193
+
194
+ return r0, r1
195
+ }
196
+
197
+ // MockNodeSelector_NodeGRPCPort_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NodeGRPCPort'
198
+ type MockNodeSelector_NodeGRPCPort_Call struct {
199
+ *mock.Call
200
+ }
201
+
202
+ // NodeGRPCPort is a helper method to define mock.On call
203
+ // - id string
204
+ func (_e *MockNodeSelector_Expecter) NodeGRPCPort(id interface{}) *MockNodeSelector_NodeGRPCPort_Call {
205
+ return &MockNodeSelector_NodeGRPCPort_Call{Call: _e.mock.On("NodeGRPCPort", id)}
206
+ }
207
+
208
+ func (_c *MockNodeSelector_NodeGRPCPort_Call) Run(run func(id string)) *MockNodeSelector_NodeGRPCPort_Call {
209
+ _c.Call.Run(func(args mock.Arguments) {
210
+ run(args[0].(string))
211
+ })
212
+ return _c
213
+ }
214
+
215
+ func (_c *MockNodeSelector_NodeGRPCPort_Call) Return(_a0 int, _a1 error) *MockNodeSelector_NodeGRPCPort_Call {
216
+ _c.Call.Return(_a0, _a1)
217
+ return _c
218
+ }
219
+
220
+ func (_c *MockNodeSelector_NodeGRPCPort_Call) RunAndReturn(run func(string) (int, error)) *MockNodeSelector_NodeGRPCPort_Call {
221
+ _c.Call.Return(run)
222
+ return _c
223
+ }
224
+
225
+ // NodeHostname provides a mock function with given fields: name
226
+ func (_m *MockNodeSelector) NodeHostname(name string) (string, bool) {
227
+ ret := _m.Called(name)
228
+
229
+ if len(ret) == 0 {
230
+ panic("no return value specified for NodeHostname")
231
+ }
232
+
233
+ var r0 string
234
+ var r1 bool
235
+ if rf, ok := ret.Get(0).(func(string) (string, bool)); ok {
236
+ return rf(name)
237
+ }
238
+ if rf, ok := ret.Get(0).(func(string) string); ok {
239
+ r0 = rf(name)
240
+ } else {
241
+ r0 = ret.Get(0).(string)
242
+ }
243
+
244
+ if rf, ok := ret.Get(1).(func(string) bool); ok {
245
+ r1 = rf(name)
246
+ } else {
247
+ r1 = ret.Get(1).(bool)
248
+ }
249
+
250
+ return r0, r1
251
+ }
252
+
253
+ // MockNodeSelector_NodeHostname_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NodeHostname'
254
+ type MockNodeSelector_NodeHostname_Call struct {
255
+ *mock.Call
256
+ }
257
+
258
+ // NodeHostname is a helper method to define mock.On call
259
+ // - name string
260
+ func (_e *MockNodeSelector_Expecter) NodeHostname(name interface{}) *MockNodeSelector_NodeHostname_Call {
261
+ return &MockNodeSelector_NodeHostname_Call{Call: _e.mock.On("NodeHostname", name)}
262
+ }
263
+
264
+ func (_c *MockNodeSelector_NodeHostname_Call) Run(run func(name string)) *MockNodeSelector_NodeHostname_Call {
265
+ _c.Call.Run(func(args mock.Arguments) {
266
+ run(args[0].(string))
267
+ })
268
+ return _c
269
+ }
270
+
271
+ func (_c *MockNodeSelector_NodeHostname_Call) Return(_a0 string, _a1 bool) *MockNodeSelector_NodeHostname_Call {
272
+ _c.Call.Return(_a0, _a1)
273
+ return _c
274
+ }
275
+
276
+ func (_c *MockNodeSelector_NodeHostname_Call) RunAndReturn(run func(string) (string, bool)) *MockNodeSelector_NodeHostname_Call {
277
+ _c.Call.Return(run)
278
+ return _c
279
+ }
280
+
281
+ // NonStorageNodes provides a mock function with no fields
282
+ func (_m *MockNodeSelector) NonStorageNodes() []string {
283
+ ret := _m.Called()
284
+
285
+ if len(ret) == 0 {
286
+ panic("no return value specified for NonStorageNodes")
287
+ }
288
+
289
+ var r0 []string
290
+ if rf, ok := ret.Get(0).(func() []string); ok {
291
+ r0 = rf()
292
+ } else {
293
+ if ret.Get(0) != nil {
294
+ r0 = ret.Get(0).([]string)
295
+ }
296
+ }
297
+
298
+ return r0
299
+ }
300
+
301
+ // MockNodeSelector_NonStorageNodes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NonStorageNodes'
302
+ type MockNodeSelector_NonStorageNodes_Call struct {
303
+ *mock.Call
304
+ }
305
+
306
+ // NonStorageNodes is a helper method to define mock.On call
307
+ func (_e *MockNodeSelector_Expecter) NonStorageNodes() *MockNodeSelector_NonStorageNodes_Call {
308
+ return &MockNodeSelector_NonStorageNodes_Call{Call: _e.mock.On("NonStorageNodes")}
309
+ }
310
+
311
+ func (_c *MockNodeSelector_NonStorageNodes_Call) Run(run func()) *MockNodeSelector_NonStorageNodes_Call {
312
+ _c.Call.Run(func(args mock.Arguments) {
313
+ run()
314
+ })
315
+ return _c
316
+ }
317
+
318
+ func (_c *MockNodeSelector_NonStorageNodes_Call) Return(_a0 []string) *MockNodeSelector_NonStorageNodes_Call {
319
+ _c.Call.Return(_a0)
320
+ return _c
321
+ }
322
+
323
+ func (_c *MockNodeSelector_NonStorageNodes_Call) RunAndReturn(run func() []string) *MockNodeSelector_NonStorageNodes_Call {
324
+ _c.Call.Return(run)
325
+ return _c
326
+ }
327
+
328
+ // SortCandidates provides a mock function with given fields: nodes
329
+ func (_m *MockNodeSelector) SortCandidates(nodes []string) []string {
330
+ ret := _m.Called(nodes)
331
+
332
+ if len(ret) == 0 {
333
+ panic("no return value specified for SortCandidates")
334
+ }
335
+
336
+ var r0 []string
337
+ if rf, ok := ret.Get(0).(func([]string) []string); ok {
338
+ r0 = rf(nodes)
339
+ } else {
340
+ if ret.Get(0) != nil {
341
+ r0 = ret.Get(0).([]string)
342
+ }
343
+ }
344
+
345
+ return r0
346
+ }
347
+
348
+ // MockNodeSelector_SortCandidates_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SortCandidates'
349
+ type MockNodeSelector_SortCandidates_Call struct {
350
+ *mock.Call
351
+ }
352
+
353
+ // SortCandidates is a helper method to define mock.On call
354
+ // - nodes []string
355
+ func (_e *MockNodeSelector_Expecter) SortCandidates(nodes interface{}) *MockNodeSelector_SortCandidates_Call {
356
+ return &MockNodeSelector_SortCandidates_Call{Call: _e.mock.On("SortCandidates", nodes)}
357
+ }
358
+
359
+ func (_c *MockNodeSelector_SortCandidates_Call) Run(run func(nodes []string)) *MockNodeSelector_SortCandidates_Call {
360
+ _c.Call.Run(func(args mock.Arguments) {
361
+ run(args[0].([]string))
362
+ })
363
+ return _c
364
+ }
365
+
366
+ func (_c *MockNodeSelector_SortCandidates_Call) Return(_a0 []string) *MockNodeSelector_SortCandidates_Call {
367
+ _c.Call.Return(_a0)
368
+ return _c
369
+ }
370
+
371
+ func (_c *MockNodeSelector_SortCandidates_Call) RunAndReturn(run func([]string) []string) *MockNodeSelector_SortCandidates_Call {
372
+ _c.Call.Return(run)
373
+ return _c
374
+ }
375
+
376
+ // StorageCandidates provides a mock function with no fields
377
+ func (_m *MockNodeSelector) StorageCandidates() []string {
378
+ ret := _m.Called()
379
+
380
+ if len(ret) == 0 {
381
+ panic("no return value specified for StorageCandidates")
382
+ }
383
+
384
+ var r0 []string
385
+ if rf, ok := ret.Get(0).(func() []string); ok {
386
+ r0 = rf()
387
+ } else {
388
+ if ret.Get(0) != nil {
389
+ r0 = ret.Get(0).([]string)
390
+ }
391
+ }
392
+
393
+ return r0
394
+ }
395
+
396
+ // MockNodeSelector_StorageCandidates_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StorageCandidates'
397
+ type MockNodeSelector_StorageCandidates_Call struct {
398
+ *mock.Call
399
+ }
400
+
401
+ // StorageCandidates is a helper method to define mock.On call
402
+ func (_e *MockNodeSelector_Expecter) StorageCandidates() *MockNodeSelector_StorageCandidates_Call {
403
+ return &MockNodeSelector_StorageCandidates_Call{Call: _e.mock.On("StorageCandidates")}
404
+ }
405
+
406
+ func (_c *MockNodeSelector_StorageCandidates_Call) Run(run func()) *MockNodeSelector_StorageCandidates_Call {
407
+ _c.Call.Run(func(args mock.Arguments) {
408
+ run()
409
+ })
410
+ return _c
411
+ }
412
+
413
+ func (_c *MockNodeSelector_StorageCandidates_Call) Return(_a0 []string) *MockNodeSelector_StorageCandidates_Call {
414
+ _c.Call.Return(_a0)
415
+ return _c
416
+ }
417
+
418
+ func (_c *MockNodeSelector_StorageCandidates_Call) RunAndReturn(run func() []string) *MockNodeSelector_StorageCandidates_Call {
419
+ _c.Call.Return(run)
420
+ return _c
421
+ }
422
+
423
+ // NewMockNodeSelector creates a new instance of MockNodeSelector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
424
+ // The first argument is typically a *testing.T value.
425
+ func NewMockNodeSelector(t interface {
426
+ mock.TestingT
427
+ Cleanup(func())
428
+ }) *MockNodeSelector {
429
+ mock := &MockNodeSelector{}
430
+ mock.Mock.Test(t)
431
+
432
+ t.Cleanup(func() { mock.AssertExpectations(t) })
433
+
434
+ return mock
435
+ }
platform/dbops/binaries/weaviate-src/usecases/cluster/state.go ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package cluster
13
+
14
+ import (
15
+ "encoding/json"
16
+ "fmt"
17
+ "net"
18
+ "slices"
19
+ "strings"
20
+ "sync"
21
+
22
+ "github.com/hashicorp/memberlist"
23
+ "github.com/pkg/errors"
24
+ "github.com/sirupsen/logrus"
25
+ )
26
+
27
+ // NodeSelector is an interface to select a portion of the available nodes in memberlist
28
+ type NodeSelector interface {
29
+ // NodeAddress resolves node id into an ip address without the port.
30
+ NodeAddress(id string) string
31
+ // NodeGRPCPort returns the gRPC port for a specific node id.
32
+ NodeGRPCPort(id string) (int, error)
33
+ // StorageCandidates returns list of storage nodes (names)
34
+ // sorted by the free amount of disk space in descending orders
35
+ StorageCandidates() []string
36
+ // NonStorageNodes return nodes from member list which
37
+ // they are configured not to be voter only
38
+ NonStorageNodes() []string
39
+ // SortCandidates Sort passed nodes names by the
40
+ // free amount of disk space in descending order
41
+ SortCandidates(nodes []string) []string
42
+ // LocalName() return local node name
43
+ LocalName() string
44
+ // NodeHostname return hosts address for a specific node name
45
+ NodeHostname(name string) (string, bool)
46
+ AllHostnames() []string
47
+ }
48
+
49
+ type State struct {
50
+ config Config
51
+ localGrpcPort int
52
+
53
+ // that lock to serialize access to memberlist
54
+ listLock sync.RWMutex
55
+ list *memberlist.Memberlist
56
+ nonStorageNodes map[string]struct{}
57
+ delegate delegate
58
+ maintenanceNodesLock sync.RWMutex
59
+ }
60
+
61
+ type Config struct {
62
+ Hostname string `json:"hostname" yaml:"hostname"`
63
+ GossipBindPort int `json:"gossipBindPort" yaml:"gossipBindPort"`
64
+ DataBindPort int `json:"dataBindPort" yaml:"dataBindPort"`
65
+ Join string `json:"join" yaml:"join"`
66
+ IgnoreStartupSchemaSync bool `json:"ignoreStartupSchemaSync" yaml:"ignoreStartupSchemaSync"`
67
+ SkipSchemaSyncRepair bool `json:"skipSchemaSyncRepair" yaml:"skipSchemaSyncRepair"`
68
+ AuthConfig AuthConfig `json:"auth" yaml:"auth"`
69
+ AdvertiseAddr string `json:"advertiseAddr" yaml:"advertiseAddr"`
70
+ AdvertisePort int `json:"advertisePort" yaml:"advertisePort"`
71
+ // FastFailureDetection mostly for testing purpose, it will make memberlist sensitive and detect
72
+ // failures (down nodes) faster.
73
+ FastFailureDetection bool `json:"fastFailureDetection" yaml:"fastFailureDetection"`
74
+ // LocalHost flag enables running a multi-node setup with the same localhost and different ports
75
+ Localhost bool `json:"localhost" yaml:"localhost"`
76
+ // MaintenanceNodes is experimental. You should not use this directly, but should use the
77
+ // public methods on the State struct. This is a list of nodes (by Hostname) that are in
78
+ // maintenance mode (eg return a 418 for all data requests). We use a list here instead of a
79
+ // bool because it allows us to set the same config/env vars on all nodes to put a subset of
80
+ // them in maintenance mode. In addition, we may want to have the cluster nodes not in
81
+ // maintenance mode be aware of which nodes are in maintenance mode in the future.
82
+ MaintenanceNodes []string `json:"maintenanceNodes" yaml:"maintenanceNodes"`
83
+ // RaftBootstrapExpect is used to detect split-brain scenarios and attempt to rejoin the cluster
84
+ // TODO-RAFT-DB-63 : shall be removed once NodeAddress() is moved under raft cluster package
85
+ RaftBootstrapExpect int
86
+ }
87
+
88
+ type AuthConfig struct {
89
+ BasicAuth BasicAuth `json:"basic" yaml:"basic"`
90
+ }
91
+
92
+ type BasicAuth struct {
93
+ Username string `json:"username" yaml:"username"`
94
+ Password string `json:"password" yaml:"password"`
95
+ }
96
+
97
+ func (ba BasicAuth) Enabled() bool {
98
+ return ba.Username != "" || ba.Password != ""
99
+ }
100
+
101
+ func Init(userConfig Config, grpcPort, raftBootstrapExpect int, dataPath string, nonStorageNodes map[string]struct{}, logger logrus.FieldLogger) (_ *State, err error) {
102
+ userConfig.RaftBootstrapExpect = raftBootstrapExpect
103
+ cfg := memberlist.DefaultLANConfig()
104
+ cfg.LogOutput = newLogParser(logger)
105
+ cfg.Name = userConfig.Hostname
106
+ state := State{
107
+ config: userConfig,
108
+ localGrpcPort: grpcPort,
109
+ nonStorageNodes: nonStorageNodes,
110
+ delegate: delegate{
111
+ Name: cfg.Name,
112
+ dataPath: dataPath,
113
+ log: logger,
114
+ metadata: NodeMetadata{
115
+ RestPort: userConfig.DataBindPort,
116
+ GrpcPort: grpcPort,
117
+ },
118
+ },
119
+ }
120
+
121
+ if err := state.delegate.init(diskSpace); err != nil {
122
+ logger.WithField("action", "init_state.delete_init").WithError(err).
123
+ Error("delegate init failed")
124
+ }
125
+ cfg.Delegate = &state.delegate
126
+ cfg.Events = events{&state.delegate}
127
+ if userConfig.GossipBindPort != 0 {
128
+ cfg.BindPort = userConfig.GossipBindPort
129
+ }
130
+
131
+ if userConfig.AdvertiseAddr != "" {
132
+ cfg.AdvertiseAddr = userConfig.AdvertiseAddr
133
+ }
134
+
135
+ if userConfig.AdvertisePort != 0 {
136
+ cfg.AdvertisePort = userConfig.AdvertisePort
137
+ }
138
+
139
+ if userConfig.FastFailureDetection {
140
+ cfg.SuspicionMult = 1
141
+ }
142
+
143
+ if state.list, err = memberlist.Create(cfg); err != nil {
144
+ logger.WithFields(logrus.Fields{
145
+ "action": "memberlist_init",
146
+ "hostname": userConfig.Hostname,
147
+ "bind_port": userConfig.GossipBindPort,
148
+ }).WithError(err).Error("memberlist not created")
149
+ return nil, errors.Wrap(err, "create member list")
150
+ }
151
+ var joinAddr []string
152
+ if userConfig.Join != "" {
153
+ joinAddr = strings.Split(userConfig.Join, ",")
154
+ }
155
+
156
+ if len(joinAddr) > 0 {
157
+ _, err := net.LookupIP(strings.Split(joinAddr[0], ":")[0])
158
+ if err != nil {
159
+ logger.WithFields(logrus.Fields{
160
+ "action": "cluster_attempt_join",
161
+ "remote_hostname": joinAddr[0],
162
+ }).WithError(err).Warn(
163
+ "specified hostname to join cluster cannot be resolved. This is fine" +
164
+ "if this is the first node of a new cluster, but problematic otherwise.")
165
+ } else {
166
+ _, err := state.list.Join(joinAddr)
167
+ if err != nil {
168
+ logger.WithFields(logrus.Fields{
169
+ "action": "memberlist_init",
170
+ "remote_hostname": joinAddr,
171
+ }).WithError(err).Error("memberlist join not successful")
172
+ return nil, errors.Wrap(err, "join cluster")
173
+ }
174
+ }
175
+ }
176
+
177
+ return &state, nil
178
+ }
179
+
180
+ // Hostnames for all live members, except self. Use AllHostnames to include
181
+ // self, prefixes the data port.
182
+ func (s *State) Hostnames() []string {
183
+ s.listLock.RLock()
184
+ defer s.listLock.RUnlock()
185
+
186
+ mem := s.list.Members()
187
+ out := make([]string, len(mem))
188
+
189
+ i := 0
190
+ for _, m := range mem {
191
+ if m.Name == s.list.LocalNode().Name {
192
+ continue
193
+ }
194
+
195
+ out[i] = fmt.Sprintf("%s:%d", m.Addr.String(), s.dataPort(m))
196
+ i++
197
+ }
198
+
199
+ return out[:i]
200
+ }
201
+
202
+ func nodeMetadata(m *memberlist.Node) (NodeMetadata, error) {
203
+ if len(m.Meta) == 0 {
204
+ return NodeMetadata{}, errors.New("no metadata available")
205
+ }
206
+
207
+ var meta NodeMetadata
208
+ if err := json.Unmarshal(m.Meta, &meta); err != nil {
209
+ return NodeMetadata{}, errors.Wrap(err, "unmarshal node metadata")
210
+ }
211
+
212
+ return meta, nil
213
+ }
214
+
215
+ func (s *State) dataPort(m *memberlist.Node) int {
216
+ meta, err := nodeMetadata(m)
217
+ if err != nil {
218
+ s.delegate.log.WithFields(logrus.Fields{
219
+ "action": "data_port_fallback",
220
+ "node": m.Name,
221
+ }).WithError(err).Debug("unable to get node metadata, falling back to default data port")
222
+
223
+ return int(m.Port) + 1 // the convention that it's 1 higher than the gossip port
224
+ }
225
+
226
+ return meta.RestPort
227
+ }
228
+
229
+ func (s *State) grpcPort(m *memberlist.Node) int {
230
+ meta, err := nodeMetadata(m)
231
+ if err != nil {
232
+ s.delegate.log.WithFields(logrus.Fields{
233
+ "action": "grpc_port_fallback",
234
+ "node": m.Name,
235
+ }).WithError(err).Debug("unable to get node metadata, falling back to default gRPC port")
236
+
237
+ return s.localGrpcPort // fallback to default gRPC port
238
+ }
239
+
240
+ return meta.GrpcPort
241
+ }
242
+
243
+ // AllHostnames for live members, including self.
244
+ func (s *State) AllHostnames() []string {
245
+ s.listLock.RLock()
246
+ defer s.listLock.RUnlock()
247
+
248
+ if s.list == nil {
249
+ return []string{}
250
+ }
251
+
252
+ mem := s.list.Members()
253
+ out := make([]string, len(mem))
254
+
255
+ for i, m := range mem {
256
+ out[i] = fmt.Sprintf("%s:%d", m.Addr.String(), s.dataPort(m))
257
+ }
258
+
259
+ return out
260
+ }
261
+
262
+ // All node names (not their hostnames!) for live members, including self.
263
+ func (s *State) AllNames() []string {
264
+ s.listLock.RLock()
265
+ defer s.listLock.RUnlock()
266
+
267
+ mem := s.list.Members()
268
+ out := make([]string, len(mem))
269
+
270
+ for i, m := range mem {
271
+ out[i] = m.Name
272
+ }
273
+
274
+ return out
275
+ }
276
+
277
+ // StorageNodes returns all nodes except non storage nodes
278
+ func (s *State) storageNodes() []string {
279
+ if len(s.nonStorageNodes) == 0 {
280
+ return s.AllNames()
281
+ }
282
+
283
+ s.listLock.RLock()
284
+ defer s.listLock.RUnlock()
285
+
286
+ members := s.list.Members()
287
+ out := make([]string, len(members))
288
+ n := 0
289
+ for _, m := range members {
290
+ name := m.Name
291
+ if _, ok := s.nonStorageNodes[name]; !ok {
292
+ out[n] = m.Name
293
+ n++
294
+ }
295
+ }
296
+
297
+ return out[:n]
298
+ }
299
+
300
+ // StorageCandidates returns list of storage nodes (names)
301
+ // sorted by the free amount of disk space in descending order
302
+ func (s *State) StorageCandidates() []string {
303
+ return s.delegate.sortCandidates(s.storageNodes())
304
+ }
305
+
306
+ // NonStorageNodes return nodes from member list which
307
+ // they are configured not to be voter only
308
+ func (s *State) NonStorageNodes() []string {
309
+ nonStorage := []string{}
310
+ for name := range s.nonStorageNodes {
311
+ nonStorage = append(nonStorage, name)
312
+ }
313
+
314
+ return nonStorage
315
+ }
316
+
317
+ // SortCandidates Sort passed nodes names by the
318
+ // free amount of disk space in descending order
319
+ func (s *State) SortCandidates(nodes []string) []string {
320
+ return s.delegate.sortCandidates(nodes)
321
+ }
322
+
323
+ // All node names (not their hostnames!) for live members, including self.
324
+ func (s *State) NodeCount() int {
325
+ s.listLock.RLock()
326
+ defer s.listLock.RUnlock()
327
+
328
+ return s.list.NumMembers()
329
+ }
330
+
331
+ // LocalName() return local node name
332
+ func (s *State) LocalName() string {
333
+ s.listLock.RLock()
334
+ defer s.listLock.RUnlock()
335
+
336
+ return s.list.LocalNode().Name
337
+ }
338
+
339
+ func (s *State) ClusterHealthScore() int {
340
+ s.listLock.RLock()
341
+ defer s.listLock.RUnlock()
342
+
343
+ return s.list.GetHealthScore()
344
+ }
345
+
346
+ func (s *State) NodeHostname(nodeName string) (string, bool) {
347
+ s.listLock.RLock()
348
+ defer s.listLock.RUnlock()
349
+
350
+ for _, mem := range s.list.Members() {
351
+ if mem.Name == nodeName {
352
+ return fmt.Sprintf("%s:%d", mem.Addr.String(), s.dataPort(mem)), true
353
+ }
354
+ }
355
+
356
+ return "", false
357
+ }
358
+
359
+ // NodeAddress is used to resolve the node name into an ip address without the port
360
+ // TODO-RAFT-DB-63 : shall be replaced by Members() which returns members in the list
361
+ func (s *State) NodeAddress(id string) string {
362
+ s.listLock.RLock()
363
+ defer s.listLock.RUnlock()
364
+
365
+ // network interruption detection which can cause a single node to be isolated from the cluster (split brain)
366
+ nodeCount := s.list.NumMembers()
367
+ var joinAddr []string
368
+ if s.config.Join != "" {
369
+ joinAddr = strings.Split(s.config.Join, ",")
370
+ }
371
+ if nodeCount == 1 && len(joinAddr) > 0 && s.config.RaftBootstrapExpect > 1 {
372
+ s.delegate.log.WithFields(logrus.Fields{
373
+ "action": "memberlist_rejoin",
374
+ "node_count": nodeCount,
375
+ }).Warn("detected single node split-brain, attempting to rejoin memberlist cluster")
376
+ // Only attempt rejoin if we're supposed to be part of a larger cluster
377
+ _, err := s.list.Join(joinAddr)
378
+ if err != nil {
379
+ s.delegate.log.WithFields(logrus.Fields{
380
+ "action": "memberlist_rejoin",
381
+ "remote_hostname": joinAddr,
382
+ }).WithError(err).Error("memberlist rejoin not successful")
383
+ } else {
384
+ s.delegate.log.WithFields(logrus.Fields{
385
+ "action": "memberlist_rejoin",
386
+ "node_count": s.list.NumMembers(),
387
+ }).Info("Successfully rejoined the memberlist cluster")
388
+ }
389
+ }
390
+
391
+ for _, mem := range s.list.Members() {
392
+ if mem.Name == id {
393
+ return mem.Addr.String()
394
+ }
395
+ }
396
+ return ""
397
+ }
398
+
399
+ func (s *State) NodeGRPCPort(nodeID string) (int, error) {
400
+ for _, mem := range s.list.Members() {
401
+ if mem.Name == nodeID {
402
+ return s.grpcPort(mem), nil
403
+ }
404
+ }
405
+ return 0, fmt.Errorf("node not found: %s", nodeID)
406
+ }
407
+
408
+ func (s *State) SchemaSyncIgnored() bool {
409
+ return s.config.IgnoreStartupSchemaSync
410
+ }
411
+
412
+ func (s *State) SkipSchemaRepair() bool {
413
+ return s.config.SkipSchemaSyncRepair
414
+ }
415
+
416
+ func (s *State) NodeInfo(node string) (NodeInfo, bool) {
417
+ return s.delegate.get(node)
418
+ }
419
+
420
+ // MaintenanceModeEnabledForLocalhost is experimental, may be removed/changed. It returns true if this node is in
421
+ // maintenance mode (which means it should return an error for all data requests).
422
+ func (s *State) MaintenanceModeEnabledForLocalhost() bool {
423
+ return s.nodeInMaintenanceMode(s.config.Hostname)
424
+ }
425
+
426
+ // SetMaintenanceModeForLocalhost is experimental, may be removed/changed. Enables/disables maintenance
427
+ // mode for this node.
428
+ func (s *State) SetMaintenanceModeForLocalhost(enabled bool) {
429
+ s.setMaintenanceModeForNode(s.config.Hostname, enabled)
430
+ }
431
+
432
+ func (s *State) setMaintenanceModeForNode(node string, enabled bool) {
433
+ s.maintenanceNodesLock.Lock()
434
+ defer s.maintenanceNodesLock.Unlock()
435
+
436
+ if s.config.MaintenanceNodes == nil {
437
+ s.config.MaintenanceNodes = []string{}
438
+ }
439
+ if !enabled {
440
+ // we're disabling maintenance mode, remove the node from the list
441
+ for i, enabledNode := range s.config.MaintenanceNodes {
442
+ if enabledNode == node {
443
+ s.config.MaintenanceNodes = append(s.config.MaintenanceNodes[:i], s.config.MaintenanceNodes[i+1:]...)
444
+ }
445
+ }
446
+ return
447
+ }
448
+ if !slices.Contains(s.config.MaintenanceNodes, node) {
449
+ // we're enabling maintenance mode, add the node to the list
450
+ s.config.MaintenanceNodes = append(s.config.MaintenanceNodes, node)
451
+ return
452
+ }
453
+ }
454
+
455
+ func (s *State) nodeInMaintenanceMode(node string) bool {
456
+ s.maintenanceNodesLock.RLock()
457
+ defer s.maintenanceNodesLock.RUnlock()
458
+
459
+ return slices.Contains(s.config.MaintenanceNodes, node)
460
+ }
platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_broadcast.go ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package cluster
13
+
14
+ import (
15
+ "context"
16
+ "fmt"
17
+ "time"
18
+
19
+ "github.com/sirupsen/logrus"
20
+ enterrors "github.com/weaviate/weaviate/entities/errors"
21
+
22
+ "github.com/pkg/errors"
23
+ )
24
+
25
+ type TxBroadcaster struct {
26
+ state MemberLister
27
+ client Client
28
+ consensusFn ConsensusFn
29
+ ideal *IdealClusterState
30
+ logger logrus.FieldLogger
31
+ }
32
+
33
+ // The Broadcaster is the link between the the current node and all other nodes
34
+ // during a tx operation. This makes it a natural place to inject a consensus
35
+ // function for read transactions. How consensus is reached is completely opaque
36
+ // to the broadcaster and can be controlled through custom business logic.
37
+ type ConsensusFn func(ctx context.Context,
38
+ in []*Transaction) (*Transaction, error)
39
+
40
+ type Client interface {
41
+ OpenTransaction(ctx context.Context, host string, tx *Transaction) error
42
+ AbortTransaction(ctx context.Context, host string, tx *Transaction) error
43
+ CommitTransaction(ctx context.Context, host string, tx *Transaction) error
44
+ }
45
+
46
+ type MemberLister interface {
47
+ AllNames() []string
48
+ Hostnames() []string
49
+ }
50
+
51
+ func NewTxBroadcaster(state MemberLister, client Client, logger logrus.FieldLogger) *TxBroadcaster {
52
+ ideal := NewIdealClusterState(state, logger)
53
+ return &TxBroadcaster{
54
+ state: state,
55
+ client: client,
56
+ ideal: ideal,
57
+ logger: logger,
58
+ }
59
+ }
60
+
61
+ func (t *TxBroadcaster) SetConsensusFunction(fn ConsensusFn) {
62
+ t.consensusFn = fn
63
+ }
64
+
65
+ func (t *TxBroadcaster) BroadcastTransaction(rootCtx context.Context, tx *Transaction) error {
66
+ if !tx.TolerateNodeFailures {
67
+ if err := t.ideal.Validate(); err != nil {
68
+ return fmt.Errorf("tx does not tolerate node failures: %w", err)
69
+ }
70
+ }
71
+
72
+ hosts := t.state.Hostnames()
73
+ resTx := make([]*Transaction, len(hosts))
74
+ eg := enterrors.NewErrorGroupWrapper(t.logger)
75
+ for i, host := range hosts {
76
+ i := i // https://golang.org/doc/faq#closures_and_goroutines
77
+ host := host // https://golang.org/doc/faq#closures_and_goroutines
78
+
79
+ eg.Go(func() error {
80
+ // make sure we don't block forever if the caller passes in an unlimited
81
+ // context. If another node does not respond within the timeout, consider
82
+ // the tx open attempt failed.
83
+ ctx, cancel := context.WithTimeout(rootCtx, 30*time.Second)
84
+ defer cancel()
85
+
86
+ t.logger.WithFields(logrus.Fields{
87
+ "action": "broadcast_transaction",
88
+ "duration": 30 * time.Second,
89
+ }).Debug("context.WithTimeout")
90
+
91
+ // the client call can mutate the tx, so we need to work with copies to
92
+ // prevent a race and to be able to keep all individual results, so they
93
+ // can be passed to the consensus fn
94
+ resTx[i] = copyTx(tx)
95
+ if err := t.client.OpenTransaction(ctx, host, resTx[i]); err != nil {
96
+ return errors.Wrapf(err, "host %q", host)
97
+ }
98
+
99
+ return nil
100
+ }, host)
101
+ }
102
+
103
+ err := eg.Wait()
104
+ if err != nil {
105
+ return err
106
+ }
107
+
108
+ if t.consensusFn != nil {
109
+ merged, err := t.consensusFn(rootCtx, resTx)
110
+ if err != nil {
111
+ return fmt.Errorf("try to reach consenus: %w", err)
112
+ }
113
+
114
+ if merged != nil {
115
+ tx.Payload = merged.Payload
116
+ }
117
+ }
118
+
119
+ return nil
120
+ }
121
+
122
+ func (t *TxBroadcaster) BroadcastAbortTransaction(rootCtx context.Context, tx *Transaction) error {
123
+ eg := enterrors.NewErrorGroupWrapper(t.logger)
124
+ for _, host := range t.state.Hostnames() {
125
+ host := host // https://golang.org/doc/faq#closures_and_goroutines
126
+ eg.Go(func() error {
127
+ // make sure we don't block forever if the caller passes in an unlimited
128
+ // context. If another node does not respond within the timeout, consider
129
+ // the tx abort attempt failed.
130
+ ctx, cancel := context.WithTimeout(rootCtx, 30*time.Second)
131
+ defer cancel()
132
+
133
+ t.logger.WithFields(logrus.Fields{
134
+ "action": "broadcast_abort_transaction",
135
+ "duration": 30 * time.Second,
136
+ }).Debug("context.WithTimeout")
137
+
138
+ if err := t.client.AbortTransaction(ctx, host, tx); err != nil {
139
+ return errors.Wrapf(err, "host %q", host)
140
+ }
141
+
142
+ return nil
143
+ }, host)
144
+ }
145
+
146
+ return eg.Wait()
147
+ }
148
+
149
+ func (t *TxBroadcaster) BroadcastCommitTransaction(rootCtx context.Context, tx *Transaction) error {
150
+ if !tx.TolerateNodeFailures {
151
+ if err := t.ideal.Validate(); err != nil {
152
+ return fmt.Errorf("tx does not tolerate node failures: %w", err)
153
+ }
154
+ }
155
+ eg := enterrors.NewErrorGroupWrapper(t.logger)
156
+ for _, host := range t.state.Hostnames() {
157
+ // make sure we don't block forever if the caller passes in an unlimited
158
+ // context. If another node does not respond within the timeout, consider
159
+ // the tx commit attempt failed.
160
+ ctx, cancel := context.WithTimeout(rootCtx, 30*time.Second)
161
+ defer cancel()
162
+
163
+ t.logger.WithFields(logrus.Fields{
164
+ "action": "broadcast_commit_transaction",
165
+ "duration": 30 * time.Second,
166
+ }).Debug("context.WithTimeout")
167
+
168
+ host := host // https://golang.org/doc/faq#closures_and_goroutines
169
+ eg.Go(func() error {
170
+ if err := t.client.CommitTransaction(ctx, host, tx); err != nil {
171
+ return errors.Wrapf(err, "host %q", host)
172
+ }
173
+
174
+ return nil
175
+ }, host)
176
+ }
177
+
178
+ return eg.Wait()
179
+ }
180
+
181
+ func copyTx(in *Transaction) *Transaction {
182
+ out := *in
183
+ return &out
184
+ }
platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_broadcast_test.go ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package cluster
13
+
14
+ import (
15
+ "context"
16
+ "fmt"
17
+ "strings"
18
+ "sync"
19
+ "testing"
20
+ "time"
21
+
22
+ "github.com/sirupsen/logrus/hooks/test"
23
+
24
+ "github.com/stretchr/testify/assert"
25
+ "github.com/stretchr/testify/require"
26
+ )
27
+
28
+ var logger, _ = test.NewNullLogger()
29
+
30
+ func TestBroadcastOpenTransaction(t *testing.T) {
31
+ client := &fakeClient{}
32
+ state := &fakeState{hosts: []string{"host1", "host2", "host3"}}
33
+
34
+ bc := NewTxBroadcaster(state, client, logger)
35
+
36
+ tx := &Transaction{ID: "foo"}
37
+
38
+ err := bc.BroadcastTransaction(context.Background(), tx)
39
+ require.Nil(t, err)
40
+
41
+ assert.ElementsMatch(t, []string{"host1", "host2", "host3"}, client.openCalled)
42
+ }
43
+
44
+ func TestBroadcastOpenTransactionWithReturnPayload(t *testing.T) {
45
+ client := &fakeClient{}
46
+ state := &fakeState{hosts: []string{"host1", "host2", "host3"}}
47
+
48
+ bc := NewTxBroadcaster(state, client, logger)
49
+ bc.SetConsensusFunction(func(ctx context.Context,
50
+ in []*Transaction,
51
+ ) (*Transaction, error) {
52
+ // instead of actually reaching a consensus this test mock simply merged
53
+ // all the individual results. For testing purposes this is even better
54
+ // because now we can be sure that every element was considered.
55
+ merged := ""
56
+ for _, tx := range in {
57
+ if len(merged) > 0 {
58
+ merged += ","
59
+ }
60
+ merged += tx.Payload.(string)
61
+ }
62
+
63
+ return &Transaction{
64
+ Payload: merged,
65
+ }, nil
66
+ })
67
+
68
+ tx := &Transaction{ID: "foo"}
69
+
70
+ err := bc.BroadcastTransaction(context.Background(), tx)
71
+ require.Nil(t, err)
72
+
73
+ assert.ElementsMatch(t, []string{"host1", "host2", "host3"}, client.openCalled)
74
+
75
+ results := strings.Split(tx.Payload.(string), ",")
76
+ assert.ElementsMatch(t, []string{
77
+ "hello_from_host1",
78
+ "hello_from_host2",
79
+ "hello_from_host3",
80
+ }, results)
81
+ }
82
+
83
+ func TestBroadcastOpenTransactionAfterNodeHasDied(t *testing.T) {
84
+ client := &fakeClient{}
85
+ state := &fakeState{hosts: []string{"host1", "host2", "host3"}}
86
+ bc := NewTxBroadcaster(state, client, logger)
87
+
88
+ waitUntilIdealStateHasReached(t, bc, 3, 4*time.Second)
89
+
90
+ // host2 is dead
91
+ state.updateHosts([]string{"host1", "host3"})
92
+
93
+ tx := &Transaction{ID: "foo"}
94
+
95
+ err := bc.BroadcastTransaction(context.Background(), tx)
96
+ require.NotNil(t, err)
97
+ assert.Contains(t, err.Error(), "host2")
98
+
99
+ // no node is should have received an open
100
+ assert.ElementsMatch(t, []string{}, client.openCalled)
101
+ }
102
+
103
+ func waitUntilIdealStateHasReached(t *testing.T, bc *TxBroadcaster, goal int,
104
+ max time.Duration,
105
+ ) {
106
+ ctx, cancel := context.WithTimeout(context.Background(), max)
107
+ defer cancel()
108
+
109
+ interval := time.NewTicker(250 * time.Millisecond)
110
+ defer interval.Stop()
111
+
112
+ for {
113
+ select {
114
+ case <-ctx.Done():
115
+ t.Error(fmt.Errorf("waiting to reach state goal %d: %w", goal, ctx.Err()))
116
+ return
117
+ case <-interval.C:
118
+ if len(bc.ideal.Members()) == goal {
119
+ return
120
+ }
121
+ }
122
+ }
123
+ }
124
+
125
+ func TestBroadcastAbortTransaction(t *testing.T) {
126
+ client := &fakeClient{}
127
+ state := &fakeState{hosts: []string{"host1", "host2", "host3"}}
128
+
129
+ bc := NewTxBroadcaster(state, client, logger)
130
+
131
+ tx := &Transaction{ID: "foo"}
132
+
133
+ err := bc.BroadcastAbortTransaction(context.Background(), tx)
134
+ require.Nil(t, err)
135
+
136
+ assert.ElementsMatch(t, []string{"host1", "host2", "host3"}, client.abortCalled)
137
+ }
138
+
139
+ func TestBroadcastCommitTransaction(t *testing.T) {
140
+ client := &fakeClient{}
141
+ state := &fakeState{hosts: []string{"host1", "host2", "host3"}}
142
+
143
+ bc := NewTxBroadcaster(state, client, logger)
144
+
145
+ tx := &Transaction{ID: "foo"}
146
+
147
+ err := bc.BroadcastCommitTransaction(context.Background(), tx)
148
+ require.Nil(t, err)
149
+
150
+ assert.ElementsMatch(t, []string{"host1", "host2", "host3"}, client.commitCalled)
151
+ }
152
+
153
+ func TestBroadcastCommitTransactionAfterNodeHasDied(t *testing.T) {
154
+ client := &fakeClient{}
155
+ state := &fakeState{hosts: []string{"host1", "host2", "host3"}}
156
+ bc := NewTxBroadcaster(state, client, logger)
157
+
158
+ waitUntilIdealStateHasReached(t, bc, 3, 4*time.Second)
159
+
160
+ state.updateHosts([]string{"host1", "host3"})
161
+
162
+ tx := &Transaction{ID: "foo"}
163
+
164
+ err := bc.BroadcastCommitTransaction(context.Background(), tx)
165
+ require.NotNil(t, err)
166
+ assert.Contains(t, err.Error(), "host2")
167
+
168
+ // no node should have received the commit
169
+ assert.ElementsMatch(t, []string{}, client.commitCalled)
170
+ }
171
+
172
+ type fakeState struct {
173
+ hosts []string
174
+ sync.Mutex
175
+ }
176
+
177
+ func (f *fakeState) updateHosts(newHosts []string) {
178
+ f.Lock()
179
+ defer f.Unlock()
180
+
181
+ f.hosts = newHosts
182
+ }
183
+
184
+ func (f *fakeState) Hostnames() []string {
185
+ f.Lock()
186
+ defer f.Unlock()
187
+
188
+ return f.hosts
189
+ }
190
+
191
+ func (f *fakeState) AllNames() []string {
192
+ f.Lock()
193
+ defer f.Unlock()
194
+
195
+ return f.hosts
196
+ }
197
+
198
+ type fakeClient struct {
199
+ sync.Mutex
200
+ openCalled []string
201
+ abortCalled []string
202
+ commitCalled []string
203
+ }
204
+
205
+ func (f *fakeClient) OpenTransaction(ctx context.Context, host string, tx *Transaction) error {
206
+ f.Lock()
207
+ defer f.Unlock()
208
+
209
+ f.openCalled = append(f.openCalled, host)
210
+ tx.Payload = "hello_from_" + host
211
+ return nil
212
+ }
213
+
214
+ func (f *fakeClient) AbortTransaction(ctx context.Context, host string, tx *Transaction) error {
215
+ f.Lock()
216
+ defer f.Unlock()
217
+
218
+ f.abortCalled = append(f.abortCalled, host)
219
+ return nil
220
+ }
221
+
222
+ func (f *fakeClient) CommitTransaction(ctx context.Context, host string, tx *Transaction) error {
223
+ f.Lock()
224
+ defer f.Unlock()
225
+
226
+ f.commitCalled = append(f.commitCalled, host)
227
+ return nil
228
+ }
platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_read.go ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package cluster
13
+
14
+ import (
15
+ "context"
16
+ "time"
17
+
18
+ "github.com/pkg/errors"
19
+ "github.com/prometheus/client_golang/prometheus"
20
+ "github.com/sirupsen/logrus"
21
+ "github.com/weaviate/weaviate/usecases/monitoring"
22
+ )
23
+
24
+ func (c *TxManager) CloseReadTransaction(ctx context.Context,
25
+ tx *Transaction,
26
+ ) error {
27
+ c.Lock()
28
+ if c.currentTransaction == nil || c.currentTransaction.ID != tx.ID {
29
+ c.Unlock()
30
+ return ErrInvalidTransaction
31
+ }
32
+
33
+ c.Unlock()
34
+ c.slowLog.Update("close_read_started")
35
+
36
+ // now that we know we are dealing with a valid transaction: no matter the
37
+ // outcome, after this call, we should not have a local transaction anymore
38
+ defer func() {
39
+ c.Lock()
40
+ c.currentTransaction = nil
41
+ monitoring.GetMetrics().SchemaTxClosed.With(prometheus.Labels{
42
+ "ownership": "coordinator",
43
+ "status": "close_read",
44
+ }).Inc()
45
+ took := time.Since(c.currentTransactionBegin)
46
+ monitoring.GetMetrics().SchemaTxDuration.With(prometheus.Labels{
47
+ "ownership": "coordinator",
48
+ "status": "close_read",
49
+ }).Observe(took.Seconds())
50
+ c.slowLog.Close("closed_read")
51
+ c.Unlock()
52
+ }()
53
+
54
+ if err := c.remote.BroadcastCommitTransaction(ctx, tx); err != nil {
55
+ // we could not open the transaction on every node, therefore we need to
56
+ // abort it everywhere.
57
+
58
+ if err := c.remote.BroadcastAbortTransaction(ctx, tx); err != nil {
59
+ c.logger.WithFields(logrus.Fields{
60
+ "action": "broadcast_abort_read_transaction",
61
+ "id": tx.ID,
62
+ }).WithError(err).Error("broadcast tx (read-only) abort failed")
63
+ }
64
+
65
+ return errors.Wrap(err, "broadcast commit read transaction")
66
+ }
67
+
68
+ return nil
69
+ }
platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_slowlog.go ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package cluster
13
+
14
+ import (
15
+ "os"
16
+ "strconv"
17
+ "sync"
18
+ "time"
19
+
20
+ "github.com/sirupsen/logrus"
21
+ "github.com/weaviate/weaviate/entities/errors"
22
+ )
23
+
24
+ func newTxSlowLog(logger logrus.FieldLogger) *txSlowLog {
25
+ ageThreshold := 5 * time.Second
26
+ changeThreshold := 1 * time.Second
27
+
28
+ if age := os.Getenv("TX_SLOW_LOG_AGE_THRESHOLD_SECONDS"); age != "" {
29
+ ageParsed, err := strconv.Atoi(age)
30
+ if err == nil {
31
+ ageThreshold = time.Duration(ageParsed) * time.Second
32
+ }
33
+ }
34
+
35
+ if change := os.Getenv("TX_SLOW_LOG_CHANGE_THRESHOLD_SECONDS"); change != "" {
36
+ changeParsed, err := strconv.Atoi(change)
37
+ if err == nil {
38
+ changeThreshold = time.Duration(changeParsed) * time.Second
39
+ }
40
+ }
41
+
42
+ return &txSlowLog{
43
+ logger: logger,
44
+ ageThreshold: ageThreshold,
45
+ changeThreshold: changeThreshold,
46
+ }
47
+ }
48
+
49
+ // txSlowLog is meant as a temporary debugging tool for the v1 schema. When the
50
+ // v2 schema is ready, this can be thrown away.
51
+ type txSlowLog struct {
52
+ sync.Mutex
53
+ logger logrus.FieldLogger
54
+
55
+ // tx-specific
56
+ id string
57
+ status string
58
+ begin time.Time
59
+ lastChange time.Time
60
+ writable bool
61
+ coordinating bool
62
+ txPresent bool
63
+ logged bool
64
+
65
+ // config
66
+ ageThreshold time.Duration
67
+ changeThreshold time.Duration
68
+ }
69
+
70
+ func (txsl *txSlowLog) Start(id string, coordinating bool,
71
+ writable bool,
72
+ ) {
73
+ txsl.Lock()
74
+ defer txsl.Unlock()
75
+
76
+ txsl.id = id
77
+ txsl.status = "opened"
78
+ now := time.Now()
79
+ txsl.begin = now
80
+ txsl.lastChange = now
81
+ txsl.coordinating = coordinating
82
+ txsl.writable = writable
83
+ txsl.txPresent = true
84
+ txsl.logged = false
85
+ }
86
+
87
+ func (txsl *txSlowLog) Update(status string) {
88
+ txsl.Lock()
89
+ defer txsl.Unlock()
90
+
91
+ txsl.status = status
92
+ txsl.lastChange = time.Now()
93
+ }
94
+
95
+ func (txsl *txSlowLog) Close(status string) {
96
+ txsl.Lock()
97
+ defer txsl.Unlock()
98
+
99
+ txsl.status = status
100
+ txsl.lastChange = time.Now()
101
+
102
+ // there are two situations where we need to log the end of the transaction:
103
+ //
104
+ // 1. if it is slower than the age threshold
105
+ //
106
+ // 2. if we have logged it before (e.g. because it was in a specific state
107
+ // longer than expected)
108
+
109
+ if txsl.lastChange.Sub(txsl.begin) >= txsl.ageThreshold || txsl.logged {
110
+ txsl.logger.WithFields(logrus.Fields{
111
+ "action": "transaction_slow_log",
112
+ "event": "tx_closed",
113
+ "status": txsl.status,
114
+ "total_duration": txsl.lastChange.Sub(txsl.begin),
115
+ "tx_id": txsl.id,
116
+ "coordinating": txsl.coordinating,
117
+ "writable": txsl.writable,
118
+ }).Infof("slow transaction completed")
119
+ }
120
+
121
+ // reset for next usage
122
+ txsl.txPresent = false
123
+ }
124
+
125
+ func (txsl *txSlowLog) StartWatching() {
126
+ t := time.Tick(500 * time.Millisecond)
127
+ errors.GoWrapper(func() {
128
+ for {
129
+ <-t
130
+ txsl.log()
131
+ }
132
+ }, txsl.logger)
133
+ }
134
+
135
+ func (txsl *txSlowLog) log() {
136
+ txsl.Lock()
137
+ defer txsl.Unlock()
138
+
139
+ if !txsl.txPresent {
140
+ return
141
+ }
142
+
143
+ now := time.Now()
144
+ age := now.Sub(txsl.begin)
145
+ changed := now.Sub(txsl.lastChange)
146
+
147
+ if age >= txsl.ageThreshold || changed >= txsl.changeThreshold {
148
+ txsl.logger.WithFields(logrus.Fields{
149
+ "action": "transaction_slow_log",
150
+ "event": "tx_in_progress",
151
+ "status": txsl.status,
152
+ "total_duration": age,
153
+ "since_last_change": changed,
154
+ "tx_id": txsl.id,
155
+ "coordinating": txsl.coordinating,
156
+ "writable": txsl.writable,
157
+ }).Infof("slow transaction in progress")
158
+
159
+ txsl.logged = true
160
+ }
161
+ }
platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_test.go ADDED
@@ -0,0 +1,570 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package cluster
13
+
14
+ import (
15
+ "context"
16
+ "fmt"
17
+ "sync"
18
+ "testing"
19
+ "time"
20
+
21
+ "github.com/sirupsen/logrus/hooks/test"
22
+ "github.com/stretchr/testify/assert"
23
+ "github.com/stretchr/testify/require"
24
+ )
25
+
26
+ func TestSuccessfulOutgoingWriteTransaction(t *testing.T) {
27
+ payload := "my-payload"
28
+ trType := TransactionType("my-type")
29
+ ctx := context.Background()
30
+
31
+ man := newTestTxManager()
32
+
33
+ tx, err := man.BeginTransaction(ctx, trType, payload, 0)
34
+ require.Nil(t, err)
35
+
36
+ err = man.CommitWriteTransaction(ctx, tx)
37
+ require.Nil(t, err)
38
+ }
39
+
40
+ func TestTryingToOpenTwoTransactions(t *testing.T) {
41
+ payload := "my-payload"
42
+ trType := TransactionType("my-type")
43
+ ctx := context.Background()
44
+
45
+ man := newTestTxManager()
46
+
47
+ tx1, err := man.BeginTransaction(ctx, trType, payload, 0)
48
+ require.Nil(t, err)
49
+
50
+ tx2, err := man.BeginTransaction(ctx, trType, payload, 0)
51
+ assert.Nil(t, tx2)
52
+ require.NotNil(t, err)
53
+ assert.Equal(t, "concurrent transaction", err.Error())
54
+
55
+ err = man.CommitWriteTransaction(ctx, tx1)
56
+ assert.Nil(t, err, "original transaction can still be committed")
57
+ }
58
+
59
+ func TestTryingToCommitInvalidTransaction(t *testing.T) {
60
+ payload := "my-payload"
61
+ trType := TransactionType("my-type")
62
+ ctx := context.Background()
63
+
64
+ man := newTestTxManager()
65
+
66
+ tx1, err := man.BeginTransaction(ctx, trType, payload, 0)
67
+ require.Nil(t, err)
68
+
69
+ invalidTx := &Transaction{ID: "invalid"}
70
+
71
+ err = man.CommitWriteTransaction(ctx, invalidTx)
72
+ require.NotNil(t, err)
73
+ assert.Equal(t, "invalid transaction", err.Error())
74
+
75
+ err = man.CommitWriteTransaction(ctx, tx1)
76
+ assert.Nil(t, err, "original transaction can still be committed")
77
+ }
78
+
79
+ func TestTryingToCommitTransactionPastTTL(t *testing.T) {
80
+ payload := "my-payload"
81
+ trType := TransactionType("my-type")
82
+ ctx := context.Background()
83
+
84
+ man := newTestTxManager()
85
+
86
+ tx1, err := man.BeginTransaction(ctx, trType, payload, time.Microsecond)
87
+ require.Nil(t, err)
88
+
89
+ expiredTx := &Transaction{ID: tx1.ID}
90
+
91
+ // give the cancel handler some time to run
92
+ time.Sleep(50 * time.Millisecond)
93
+
94
+ err = man.CommitWriteTransaction(ctx, expiredTx)
95
+ require.NotNil(t, err)
96
+ assert.Contains(t, err.Error(), "transaction TTL")
97
+
98
+ // make sure it is possible to open future transactions
99
+ _, err = man.BeginTransaction(context.Background(), trType, payload, 0)
100
+ require.Nil(t, err)
101
+ }
102
+
103
+ func TestTryingToCommitIncomingTransactionPastTTL(t *testing.T) {
104
+ payload := "my-payload"
105
+ trType := TransactionType("my-type")
106
+ ctx := context.Background()
107
+
108
+ man := newTestTxManager()
109
+
110
+ dl := time.Now().Add(1 * time.Microsecond)
111
+
112
+ tx := &Transaction{
113
+ ID: "123456",
114
+ Type: trType,
115
+ Payload: payload,
116
+ Deadline: dl,
117
+ }
118
+
119
+ man.IncomingBeginTransaction(context.Background(), tx)
120
+
121
+ // give the cancel handler some time to run
122
+ time.Sleep(50 * time.Millisecond)
123
+
124
+ err := man.IncomingCommitTransaction(ctx, tx)
125
+ require.NotNil(t, err)
126
+ assert.Contains(t, err.Error(), "transaction TTL")
127
+
128
+ // make sure it is possible to open future transactions
129
+ _, err = man.BeginTransaction(context.Background(), trType, payload, 0)
130
+ require.Nil(t, err)
131
+ }
132
+
133
+ func TestLettingATransactionExpire(t *testing.T) {
134
+ payload := "my-payload"
135
+ trType := TransactionType("my-type")
136
+ ctx := context.Background()
137
+
138
+ man := newTestTxManager()
139
+
140
+ tx1, err := man.BeginTransaction(ctx, trType, payload, time.Microsecond)
141
+ require.Nil(t, err)
142
+
143
+ // give the cancel handler some time to run
144
+ time.Sleep(50 * time.Millisecond)
145
+
146
+ // try to open a new one
147
+ _, err = man.BeginTransaction(context.Background(), trType, payload, 0)
148
+ require.Nil(t, err)
149
+
150
+ // since the old one expired, we now expect a TTL error instead of a
151
+ // concurrent tx error when trying to refer to the old one
152
+ err = man.CommitWriteTransaction(context.Background(), tx1)
153
+ require.NotNil(t, err)
154
+ assert.Contains(t, err.Error(), "transaction TTL")
155
+ }
156
+
157
+ func TestRemoteDoesntAllowOpeningTransaction(t *testing.T) {
158
+ payload := "my-payload"
159
+ trType := TransactionType("my-type")
160
+ ctx := context.Background()
161
+ broadcaster := &fakeBroadcaster{
162
+ openErr: ErrConcurrentTransaction,
163
+ }
164
+
165
+ man := newTestTxManagerWithRemote(broadcaster)
166
+
167
+ tx1, err := man.BeginTransaction(ctx, trType, payload, 0)
168
+ require.Nil(t, tx1)
169
+ require.NotNil(t, err)
170
+ assert.Contains(t, err.Error(), "open transaction")
171
+
172
+ assert.Len(t, broadcaster.abortCalledId, 36, "a valid uuid was aborted")
173
+ }
174
+
175
+ func TestRemoteDoesntAllowOpeningTransactionAbortFails(t *testing.T) {
176
+ payload := "my-payload"
177
+ trType := TransactionType("my-type")
178
+ ctx := context.Background()
179
+ broadcaster := &fakeBroadcaster{
180
+ openErr: ErrConcurrentTransaction,
181
+ abortErr: fmt.Errorf("cannot abort"),
182
+ }
183
+
184
+ man, hook := newTestTxManagerWithRemoteLoggerHook(broadcaster)
185
+
186
+ tx1, err := man.BeginTransaction(ctx, trType, payload, 0)
187
+ require.Nil(t, tx1)
188
+ require.NotNil(t, err)
189
+ assert.Contains(t, err.Error(), "open transaction")
190
+
191
+ assert.Len(t, broadcaster.abortCalledId, 36, "a valid uuid was aborted")
192
+
193
+ require.Len(t, hook.Entries, 1)
194
+ assert.Equal(t, "broadcast tx abort failed", hook.Entries[0].Message)
195
+ }
196
+
197
+ type fakeBroadcaster struct {
198
+ openErr error
199
+ commitErr error
200
+ abortErr error
201
+ abortCalledId string
202
+ }
203
+
204
+ func (f *fakeBroadcaster) BroadcastTransaction(ctx context.Context,
205
+ tx *Transaction,
206
+ ) error {
207
+ return f.openErr
208
+ }
209
+
210
+ func (f *fakeBroadcaster) BroadcastAbortTransaction(ctx context.Context,
211
+ tx *Transaction,
212
+ ) error {
213
+ f.abortCalledId = tx.ID
214
+ return f.abortErr
215
+ }
216
+
217
+ func (f *fakeBroadcaster) BroadcastCommitTransaction(ctx context.Context,
218
+ tx *Transaction,
219
+ ) error {
220
+ return f.commitErr
221
+ }
222
+
223
+ func TestSuccessfulDistributedWriteTransaction(t *testing.T) {
224
+ ctx := context.Background()
225
+
226
+ var remoteState interface{}
227
+ remote := newTestTxManager()
228
+ remote.SetCommitFn(func(ctx context.Context, tx *Transaction) error {
229
+ remoteState = tx.Payload
230
+ return nil
231
+ })
232
+ local := NewTxManager(&wrapTxManagerAsBroadcaster{remote},
233
+ &fakeTxPersistence{}, remote.logger)
234
+ local.StartAcceptIncoming()
235
+
236
+ payload := "my-payload"
237
+ trType := TransactionType("my-type")
238
+
239
+ tx, err := local.BeginTransaction(ctx, trType, payload, 0)
240
+ require.Nil(t, err)
241
+
242
+ err = local.CommitWriteTransaction(ctx, tx)
243
+ require.Nil(t, err)
244
+
245
+ assert.Equal(t, "my-payload", remoteState)
246
+ }
247
+
248
+ // based on https://github.com/weaviate/weaviate/issues/4637
249
+ func TestDistributedWriteTransactionWithRemoteCommitFailure(t *testing.T) {
250
+ ctx := context.Background()
251
+
252
+ var remoteState interface{}
253
+ remote := newTestTxManager()
254
+ remoteShoudError := true
255
+ remote.SetCommitFn(func(ctx context.Context, tx *Transaction) error {
256
+ if remoteShoudError {
257
+ return fmt.Errorf("could not commit")
258
+ }
259
+
260
+ remoteState = tx.Payload
261
+ return nil
262
+ })
263
+ local := NewTxManager(&wrapTxManagerAsBroadcaster{remote},
264
+ &fakeTxPersistence{}, remote.logger)
265
+ local.StartAcceptIncoming()
266
+
267
+ payload := "my-payload"
268
+ trType := TransactionType("my-type")
269
+
270
+ tx, err := local.BeginTransaction(ctx, trType, payload, 0)
271
+ require.Nil(t, err)
272
+
273
+ err = local.CommitWriteTransaction(ctx, tx)
274
+ // expected that the commit fails if a remote node can't commit
275
+ assert.NotNil(t, err)
276
+
277
+ remoteShoudError = false
278
+
279
+ // now try again and assert that everything works fine Prior to
280
+ // https://github.com/weaviate/weaviate/issues/4637 we would now get
281
+ // concurrent tx errors
282
+
283
+ payload = "my-updated-payload"
284
+ newTx, err := local.BeginTransaction(ctx, trType, payload, 0)
285
+ require.Nil(t, err)
286
+
287
+ err = local.CommitWriteTransaction(ctx, newTx)
288
+ require.Nil(t, err)
289
+
290
+ assert.Equal(t, "my-updated-payload", remoteState)
291
+ }
292
+
293
+ func TestConcurrentDistributedTransaction(t *testing.T) {
294
+ ctx := context.Background()
295
+
296
+ var remoteState interface{}
297
+ remote := newTestTxManager()
298
+ remote.SetCommitFn(func(ctx context.Context, tx *Transaction) error {
299
+ remoteState = tx.Payload
300
+ return nil
301
+ })
302
+ local := NewTxManager(&wrapTxManagerAsBroadcaster{remote},
303
+ &fakeTxPersistence{}, remote.logger)
304
+
305
+ payload := "my-payload"
306
+ trType := TransactionType("my-type")
307
+
308
+ // open a transaction on the remote to simulate a concurrent transaction.
309
+ // Since it uses the fakeBroadcaster it does not tell anyone about it, this
310
+ // way we can be sure that the reason for failure is actually a concurrent
311
+ // transaction on the remote side, not on the local side. Compare this to a
312
+ // situation where broadcasting was bi-directional: Then this transaction
313
+ // would have been opened successfully and already be replicated to the
314
+ // "local" tx manager. So the next call on "local" would also fail, but for
315
+ // the wrong reason: It would fail because another transaction is already in
316
+ // place. We, however want to simulate a situation where due to network
317
+ // delays, etc. both sides try to open a transaction more or less in
318
+ // parallel.
319
+ _, err := remote.BeginTransaction(ctx, trType, "wrong payload", 0)
320
+ require.Nil(t, err)
321
+
322
+ tx, err := local.BeginTransaction(ctx, trType, payload, 0)
323
+ require.Nil(t, tx)
324
+ require.NotNil(t, err)
325
+ assert.Contains(t, err.Error(), "concurrent transaction")
326
+
327
+ assert.Equal(t, nil, remoteState, "remote state should not have been updated")
328
+ }
329
+
330
+ // This test simulates three nodes trying to open a tx at basically the same
331
+ // time with the simulated network being so slow that other nodes will try to
332
+ // open their own transactions before they receive the incoming tx. This is a
333
+ // situation where everyone thinks they were the first to open the tx and there
334
+ // is no clear winner. All attempts must fail!
335
+ func TestConcurrentOpenAttemptsOnSlowNetwork(t *testing.T) {
336
+ ctx := context.Background()
337
+
338
+ broadcaster := &slowMultiBroadcaster{delay: 100 * time.Millisecond}
339
+ node1 := newTestTxManagerWithRemote(broadcaster)
340
+ node2 := newTestTxManagerWithRemote(broadcaster)
341
+ node3 := newTestTxManagerWithRemote(broadcaster)
342
+
343
+ broadcaster.nodes = []*TxManager{node1, node2, node3}
344
+
345
+ trType := TransactionType("my-type")
346
+
347
+ wg := &sync.WaitGroup{}
348
+ wg.Add(1)
349
+ go func() {
350
+ defer wg.Done()
351
+ _, err := node1.BeginTransaction(ctx, trType, "payload-from-node-1", 0)
352
+ assert.NotNil(t, err, "open tx 1 must fail")
353
+ }()
354
+
355
+ wg.Add(1)
356
+ go func() {
357
+ defer wg.Done()
358
+ _, err := node2.BeginTransaction(ctx, trType, "payload-from-node-2", 0)
359
+ assert.NotNil(t, err, "open tx 2 must fail")
360
+ }()
361
+
362
+ wg.Add(1)
363
+ go func() {
364
+ defer wg.Done()
365
+ _, err := node3.BeginTransaction(ctx, trType, "payload-from-node-3", 0)
366
+ assert.NotNil(t, err, "open tx 3 must fail")
367
+ }()
368
+
369
+ wg.Wait()
370
+ }
371
+
372
+ type wrapTxManagerAsBroadcaster struct {
373
+ txManager *TxManager
374
+ }
375
+
376
+ func (w *wrapTxManagerAsBroadcaster) BroadcastTransaction(ctx context.Context,
377
+ tx *Transaction,
378
+ ) error {
379
+ _, err := w.txManager.IncomingBeginTransaction(ctx, tx)
380
+ return err
381
+ }
382
+
383
+ func (w *wrapTxManagerAsBroadcaster) BroadcastAbortTransaction(ctx context.Context,
384
+ tx *Transaction,
385
+ ) error {
386
+ w.txManager.IncomingAbortTransaction(ctx, tx)
387
+ return nil
388
+ }
389
+
390
+ func (w *wrapTxManagerAsBroadcaster) BroadcastCommitTransaction(ctx context.Context,
391
+ tx *Transaction,
392
+ ) error {
393
+ return w.txManager.IncomingCommitTransaction(ctx, tx)
394
+ }
395
+
396
+ type slowMultiBroadcaster struct {
397
+ delay time.Duration
398
+ nodes []*TxManager
399
+ }
400
+
401
+ func (b *slowMultiBroadcaster) BroadcastTransaction(ctx context.Context,
402
+ tx *Transaction,
403
+ ) error {
404
+ time.Sleep(b.delay)
405
+ for _, node := range b.nodes {
406
+ if _, err := node.IncomingBeginTransaction(ctx, tx); err != nil {
407
+ return err
408
+ }
409
+ }
410
+ return nil
411
+ }
412
+
413
+ func (b *slowMultiBroadcaster) BroadcastAbortTransaction(ctx context.Context,
414
+ tx *Transaction,
415
+ ) error {
416
+ time.Sleep(b.delay)
417
+ for _, node := range b.nodes {
418
+ node.IncomingAbortTransaction(ctx, tx)
419
+ }
420
+
421
+ return nil
422
+ }
423
+
424
+ func (b *slowMultiBroadcaster) BroadcastCommitTransaction(ctx context.Context,
425
+ tx *Transaction,
426
+ ) error {
427
+ time.Sleep(b.delay)
428
+ for _, node := range b.nodes {
429
+ if err := node.IncomingCommitTransaction(ctx, tx); err != nil {
430
+ return err
431
+ }
432
+ }
433
+
434
+ return nil
435
+ }
436
+
437
+ func TestSuccessfulDistributedReadTransaction(t *testing.T) {
438
+ ctx := context.Background()
439
+ payload := "my-payload"
440
+
441
+ remote := newTestTxManager()
442
+ remote.SetResponseFn(func(ctx context.Context, tx *Transaction) ([]byte, error) {
443
+ tx.Payload = payload
444
+ return nil, nil
445
+ })
446
+ local := NewTxManager(&wrapTxManagerAsBroadcaster{remote},
447
+ &fakeTxPersistence{}, remote.logger)
448
+ // TODO local.SetConsensusFn
449
+
450
+ trType := TransactionType("my-read-tx")
451
+
452
+ tx, err := local.BeginTransaction(ctx, trType, nil, 0)
453
+ require.Nil(t, err)
454
+
455
+ local.CloseReadTransaction(ctx, tx)
456
+
457
+ assert.Equal(t, "my-payload", tx.Payload)
458
+ }
459
+
460
+ func TestSuccessfulDistributedTransactionSetAllowUnready(t *testing.T) {
461
+ ctx := context.Background()
462
+ payload := "my-payload"
463
+
464
+ types := []TransactionType{"type0", "type1"}
465
+ remote := newTestTxManagerAllowUnready(types)
466
+ remote.SetResponseFn(func(ctx context.Context, tx *Transaction) ([]byte, error) {
467
+ tx.Payload = payload
468
+ return nil, nil
469
+ })
470
+ local := NewTxManager(&wrapTxManagerAsBroadcaster{remote},
471
+ &fakeTxPersistence{}, remote.logger)
472
+ local.SetAllowUnready(types)
473
+
474
+ trType := TransactionType("my-read-tx")
475
+
476
+ tx, err := local.BeginTransaction(ctx, trType, nil, 0)
477
+ require.Nil(t, err)
478
+
479
+ local.CloseReadTransaction(ctx, tx)
480
+
481
+ assert.ElementsMatch(t, types, remote.allowUnready)
482
+ assert.ElementsMatch(t, types, local.allowUnready)
483
+ assert.Equal(t, "my-payload", tx.Payload)
484
+ }
485
+
486
+ func TestTxWithDeadline(t *testing.T) {
487
+ t.Run("expired", func(t *testing.T) {
488
+ payload := "my-payload"
489
+ trType := TransactionType("my-type")
490
+
491
+ ctx := context.Background()
492
+
493
+ man := newTestTxManager()
494
+
495
+ tx, err := man.BeginTransaction(ctx, trType, payload, 1*time.Nanosecond)
496
+ require.Nil(t, err)
497
+
498
+ ctx, cancel := context.WithDeadline(context.Background(), tx.Deadline)
499
+ defer cancel()
500
+
501
+ assert.NotNil(t, ctx.Err())
502
+ })
503
+
504
+ t.Run("still valid", func(t *testing.T) {
505
+ payload := "my-payload"
506
+ trType := TransactionType("my-type")
507
+
508
+ ctx := context.Background()
509
+
510
+ man := newTestTxManager()
511
+
512
+ tx, err := man.BeginTransaction(ctx, trType, payload, 10*time.Second)
513
+ require.Nil(t, err)
514
+
515
+ ctx, cancel := context.WithDeadline(context.Background(), tx.Deadline)
516
+ defer cancel()
517
+
518
+ assert.Nil(t, ctx.Err())
519
+ })
520
+ }
521
+
522
+ func newTestTxManager() *TxManager {
523
+ logger, _ := test.NewNullLogger()
524
+ m := NewTxManager(&fakeBroadcaster{}, &fakeTxPersistence{}, logger)
525
+ m.StartAcceptIncoming()
526
+ return m
527
+ }
528
+
529
+ func newTestTxManagerWithRemote(remote Remote) *TxManager {
530
+ logger, _ := test.NewNullLogger()
531
+ m := NewTxManager(remote, &fakeTxPersistence{}, logger)
532
+ m.StartAcceptIncoming()
533
+ return m
534
+ }
535
+
536
+ func newTestTxManagerWithRemoteLoggerHook(remote Remote) (*TxManager, *test.Hook) {
537
+ logger, hook := test.NewNullLogger()
538
+ m := NewTxManager(remote, &fakeTxPersistence{}, logger)
539
+ m.StartAcceptIncoming()
540
+ return m, hook
541
+ }
542
+
543
+ func newTestTxManagerAllowUnready(types []TransactionType) *TxManager {
544
+ logger, _ := test.NewNullLogger()
545
+ m := NewTxManager(&fakeBroadcaster{}, &fakeTxPersistence{}, logger)
546
+ m.SetAllowUnready(types)
547
+ m.StartAcceptIncoming()
548
+ return m
549
+ }
550
+
551
+ // does nothing as these do not involve crashes
552
+ type fakeTxPersistence struct{}
553
+
554
+ func (f *fakeTxPersistence) StoreTx(ctx context.Context,
555
+ tx *Transaction,
556
+ ) error {
557
+ return nil
558
+ }
559
+
560
+ func (f *fakeTxPersistence) DeleteTx(ctx context.Context,
561
+ txID string,
562
+ ) error {
563
+ return nil
564
+ }
565
+
566
+ func (f *fakeTxPersistence) IterateAll(ctx context.Context,
567
+ cb func(tx *Transaction),
568
+ ) error {
569
+ return nil
570
+ }
platform/dbops/binaries/weaviate-src/usecases/cluster/transactions_write.go ADDED
@@ -0,0 +1,660 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package cluster
13
+
14
+ import (
15
+ "context"
16
+ "fmt"
17
+ "slices"
18
+ "sync"
19
+ "time"
20
+
21
+ "github.com/prometheus/client_golang/prometheus"
22
+ enterrors "github.com/weaviate/weaviate/entities/errors"
23
+ "github.com/weaviate/weaviate/usecases/monitoring"
24
+
25
+ "github.com/google/uuid"
26
+ "github.com/pkg/errors"
27
+ "github.com/sirupsen/logrus"
28
+ )
29
+
30
+ type TransactionType string
31
+
32
+ var (
33
+ ErrConcurrentTransaction = errors.New("concurrent transaction")
34
+ ErrInvalidTransaction = errors.New("invalid transaction")
35
+ ErrExpiredTransaction = errors.New("transaction TTL expired")
36
+ ErrNotReady = errors.New("server is not ready: either starting up or shutting down")
37
+ )
38
+
39
+ type Remote interface {
40
+ BroadcastTransaction(ctx context.Context, tx *Transaction) error
41
+ BroadcastAbortTransaction(ctx context.Context, tx *Transaction) error
42
+ BroadcastCommitTransaction(ctx context.Context, tx *Transaction) error
43
+ }
44
+
45
+ type (
46
+ CommitFn func(ctx context.Context, tx *Transaction) error
47
+ ResponseFn func(ctx context.Context, tx *Transaction) ([]byte, error)
48
+ )
49
+
50
+ type TxManager struct {
51
+ sync.Mutex
52
+ logger logrus.FieldLogger
53
+
54
+ currentTransaction *Transaction
55
+ currentTransactionContext context.Context
56
+ currentTransactionBegin time.Time
57
+ clearTransaction func()
58
+
59
+ // any time we start working on a commit, we need to add to this WaitGroup.
60
+ // It will block shutdwon until the commit has completed to make sure that we
61
+ // can't accidentally shutdown while a tx is committing.
62
+ ongoingCommits sync.WaitGroup
63
+
64
+ // when a shutdown signal has been received, we will no longer accept any new
65
+ // tx's or commits
66
+ acceptIncoming bool
67
+
68
+ // read transactions that need to run at start can still be served, they have
69
+ // no side-effects on the node that accepts them.
70
+ //
71
+ // If we disallowed them completely, then two unready nodes would be in a
72
+ // deadlock as they each require information from the other(s) who can't
73
+ // answerbecause they're not ready.
74
+ allowUnready []TransactionType
75
+
76
+ remote Remote
77
+ commitFn CommitFn
78
+ responseFn ResponseFn
79
+
80
+ // keep the ids of expired transactions around. This way, we can return a
81
+ // nicer error message to the user. Instead of just an "invalid transaction"
82
+ // which no longer exists, they will get an explicit error message mentioning
83
+ // the timeout.
84
+ expiredTxIDs []string
85
+
86
+ persistence Persistence
87
+
88
+ slowLog *txSlowLog
89
+ }
90
+
91
+ func newDummyCommitResponseFn() func(ctx context.Context, tx *Transaction) error {
92
+ return func(ctx context.Context, tx *Transaction) error {
93
+ return nil
94
+ }
95
+ }
96
+
97
+ func newDummyResponseFn() func(ctx context.Context, tx *Transaction) ([]byte, error) {
98
+ return func(ctx context.Context, tx *Transaction) ([]byte, error) {
99
+ return nil, nil
100
+ }
101
+ }
102
+
103
+ func NewTxManager(remote Remote, persistence Persistence,
104
+ logger logrus.FieldLogger,
105
+ ) *TxManager {
106
+ txm := &TxManager{
107
+ remote: remote,
108
+
109
+ // by setting dummy fns that do nothing on default it is possible to run
110
+ // the tx manager with only one set of functions. For example, if the
111
+ // specific Tx is only ever used for broadcasting writes, there is no need
112
+ // to set a responseFn. However, if the fn was nil, we'd panic. Thus a
113
+ // dummy function is a reasonable default - and much cleaner than a
114
+ // nil-check on every call.
115
+ commitFn: newDummyCommitResponseFn(),
116
+ responseFn: newDummyResponseFn(),
117
+ logger: logger,
118
+ persistence: persistence,
119
+
120
+ // ready to serve incoming requests
121
+ acceptIncoming: false,
122
+ slowLog: newTxSlowLog(logger),
123
+ }
124
+
125
+ txm.slowLog.StartWatching()
126
+ return txm
127
+ }
128
+
129
+ func (c *TxManager) StartAcceptIncoming() {
130
+ c.Lock()
131
+ defer c.Unlock()
132
+
133
+ c.acceptIncoming = true
134
+ }
135
+
136
+ func (c *TxManager) SetAllowUnready(types []TransactionType) {
137
+ c.Lock()
138
+ defer c.Unlock()
139
+
140
+ c.allowUnready = types
141
+ }
142
+
143
+ // HaveDanglingTxs is a way to check if there are any uncommitted transactions
144
+ // in the durable storage. This can be used to make decisions about whether a
145
+ // failed schema check can be temporarily ignored - with the assumption that
146
+ // applying the dangling txs will fix the issue.
147
+ func (c *TxManager) HaveDanglingTxs(ctx context.Context,
148
+ allowedTypes []TransactionType,
149
+ ) (found bool) {
150
+ c.persistence.IterateAll(context.Background(), func(tx *Transaction) {
151
+ if !slices.Contains(allowedTypes, tx.Type) {
152
+ return
153
+ }
154
+ found = true
155
+ })
156
+
157
+ return
158
+ }
159
+
160
+ // TryResumeDanglingTxs loops over the existing transactions and applies them.
161
+ // It only does so if the transaction type is explicitly listed as allowed.
162
+ // This is because - at the time of creating this - we were not sure if all
163
+ // transaction commit functions are idempotent. If one would not be, then
164
+ // reapplying a tx or tx commit could potentially be dangerous, as we don't
165
+ // know if it was already applied prior to the node death.
166
+ //
167
+ // For example, think of a "add property 'foo'" tx, that does nothing but
168
+ // append the property to the schema. If this ran twice, we might now end up
169
+ // with two duplicate properties with the name 'foo' which could in turn create
170
+ // other problems. To make sure all txs are resumable (which is what we want
171
+ // because that's the only way to avoid schema issues), we need to make sure
172
+ // that every single tx is idempotent, then add them to the allow list.
173
+ //
174
+ // One other limitation is that this method currently does nothing to check if
175
+ // a tx was really committed or not. In an ideal world, the node would contact
176
+ // the other nodes and ask. However, this sipmler implementation does not do
177
+ // this check. Instead [HaveDanglingTxs] is used in combination with the schema
178
+ // check. If the schema is not out of sync in the first place, no txs will be
179
+ // applied. This does not cover all edge cases, but it seems to work for now.
180
+ // This should be improved in the future.
181
+ func (c *TxManager) TryResumeDanglingTxs(ctx context.Context,
182
+ allowedTypes []TransactionType,
183
+ ) (applied bool, err error) {
184
+ c.persistence.IterateAll(context.Background(), func(tx *Transaction) {
185
+ if !slices.Contains(allowedTypes, tx.Type) {
186
+ c.logger.WithFields(logrus.Fields{
187
+ "action": "resume_transaction",
188
+ "transaction_id": tx.ID,
189
+ "transaction_type": tx.Type,
190
+ }).Warnf("dangling transaction %q of type %q is not known to be resumable - skipping",
191
+ tx.ID, tx.Type)
192
+
193
+ return
194
+ }
195
+ if err = c.commitFn(ctx, tx); err != nil {
196
+ return
197
+ }
198
+
199
+ applied = true
200
+ c.logger.WithFields(logrus.Fields{
201
+ "action": "resume_transaction",
202
+ "transaction_id": tx.ID,
203
+ "transaction_type": tx.Type,
204
+ }).Infof("successfully resumed dangling transaction %q of type %q", tx.ID, tx.Type)
205
+ })
206
+
207
+ return
208
+ }
209
+
210
+ func (c *TxManager) resetTxExpiry(ttl time.Duration, id string) {
211
+ cancel := func() {}
212
+ ctx := context.Background()
213
+ if ttl == 0 {
214
+ c.currentTransactionContext = context.Background()
215
+ } else {
216
+ ctx, cancel = context.WithTimeout(ctx, ttl)
217
+ c.logger.WithFields(logrus.Fields{
218
+ "action": "reset_tx_expiry",
219
+ "duration": ttl,
220
+ }).Debug("context.WithTimeout")
221
+ c.currentTransactionContext = ctx
222
+ }
223
+
224
+ // to prevent a goroutine leak for the new routine we're spawning here,
225
+ // register a way to terminate it in case the explicit cancel is called
226
+ // before the context's done channel fires.
227
+ clearCancelListener := make(chan struct{}, 1)
228
+
229
+ c.clearTransaction = func() {
230
+ c.currentTransaction = nil
231
+ c.currentTransactionContext = nil
232
+ c.clearTransaction = func() {}
233
+
234
+ clearCancelListener <- struct{}{}
235
+ close(clearCancelListener)
236
+ }
237
+
238
+ f := func() {
239
+ ctxDone := ctx.Done()
240
+ select {
241
+ case <-clearCancelListener:
242
+ cancel()
243
+ return
244
+ case <-ctxDone:
245
+ c.Lock()
246
+ defer c.Unlock()
247
+ c.expiredTxIDs = append(c.expiredTxIDs, id)
248
+
249
+ if c.currentTransaction == nil {
250
+ // tx is already cleaned up, for example from a successful commit. Nothing to do for us
251
+ return
252
+ }
253
+
254
+ if c.currentTransaction.ID != id {
255
+ // tx was already cleaned up, then a new tx was started. Any action from
256
+ // us would be destructive, as we'd accidentally destroy a perfectly valid
257
+ // tx
258
+ return
259
+ }
260
+
261
+ c.clearTransaction()
262
+ monitoring.GetMetrics().SchemaTxClosed.With(prometheus.Labels{
263
+ "ownership": "n/a",
264
+ "status": "expire",
265
+ }).Inc()
266
+ took := time.Since(c.currentTransactionBegin)
267
+ monitoring.GetMetrics().SchemaTxDuration.With(prometheus.Labels{
268
+ "ownership": "n/a",
269
+ "status": "expire",
270
+ }).Observe(took.Seconds())
271
+ c.slowLog.Close("expired")
272
+ }
273
+ }
274
+ enterrors.GoWrapper(f, c.logger)
275
+ }
276
+
277
+ // expired is a helper to return a more meaningful error message to the user.
278
+ // Instead of just telling the user that an ID does not exist, this tracks that
279
+ // it once existed, but has been cleared because it expired.
280
+ //
281
+ // This method is not thread-safe as the assumption is that it is called from a
282
+ // thread-safe environment where a lock would already be held
283
+ func (c *TxManager) expired(id string) bool {
284
+ for _, expired := range c.expiredTxIDs {
285
+ if expired == id {
286
+ return true
287
+ }
288
+ }
289
+
290
+ return false
291
+ }
292
+
293
+ // SetCommitFn sets a function that is used in Write Transactions, you can
294
+ // read from the transaction payload and use that state to alter your local
295
+ // state
296
+ func (c *TxManager) SetCommitFn(fn CommitFn) {
297
+ c.commitFn = fn
298
+ }
299
+
300
+ // SetResponseFn sets a function that is used in Read Transactions. The
301
+ // function sets the local state (by writing it into the Tx Payload). It can
302
+ // then be sent to other nodes. Consensus is not part of the ResponseFn. The
303
+ // coordinator - who initiated the Tx - is responsible for coming up with
304
+ // consensus. Deciding on Consensus requires insights into business logic, as
305
+ // from the TX's perspective payloads are opaque.
306
+ func (c *TxManager) SetResponseFn(fn ResponseFn) {
307
+ c.responseFn = fn
308
+ }
309
+
310
+ // Begin a Transaction with the specified type and payload. Transactions expire
311
+ // after the specified TTL. For a transaction that does not ever expire, pass
312
+ // in a ttl of 0. When choosing TTLs keep in mind that clocks might be slightly
313
+ // skewed in the cluster, therefore set your TTL for desiredTTL +
314
+ // toleratedClockSkew
315
+ //
316
+ // Regular transactions cannot be opened if the cluster is not considered
317
+ // healthy.
318
+ func (c *TxManager) BeginTransaction(ctx context.Context, trType TransactionType,
319
+ payload interface{}, ttl time.Duration,
320
+ ) (*Transaction, error) {
321
+ return c.beginTransaction(ctx, trType, payload, ttl, false)
322
+ }
323
+
324
+ // Begin a Transaction that does not require the whole cluster to be healthy.
325
+ // This can be used for example in bootstrapping situations when not all nodes
326
+ // are present yet, or in disaster recovery situations when a node needs to run
327
+ // a transaction in order to re-join a cluster.
328
+ func (c *TxManager) BeginTransactionTolerateNodeFailures(ctx context.Context, trType TransactionType,
329
+ payload interface{}, ttl time.Duration,
330
+ ) (*Transaction, error) {
331
+ return c.beginTransaction(ctx, trType, payload, ttl, true)
332
+ }
333
+
334
+ func (c *TxManager) beginTransaction(ctx context.Context, trType TransactionType,
335
+ payload interface{}, ttl time.Duration, tolerateNodeFailures bool,
336
+ ) (*Transaction, error) {
337
+ c.Lock()
338
+
339
+ if c.currentTransaction != nil {
340
+ c.Unlock()
341
+ return nil, ErrConcurrentTransaction
342
+ }
343
+
344
+ tx := &Transaction{
345
+ Type: trType,
346
+ ID: uuid.New().String(),
347
+ Payload: payload,
348
+ TolerateNodeFailures: tolerateNodeFailures,
349
+ }
350
+ if ttl > 0 {
351
+ tx.Deadline = time.Now().Add(ttl)
352
+ } else {
353
+ // UnixTime == 0 represents unlimited
354
+ tx.Deadline = time.UnixMilli(0)
355
+ }
356
+ c.currentTransaction = tx
357
+ c.currentTransactionBegin = time.Now()
358
+ c.slowLog.Start(tx.ID, true, !tolerateNodeFailures)
359
+ c.Unlock()
360
+
361
+ monitoring.GetMetrics().SchemaTxOpened.With(prometheus.Labels{
362
+ "ownership": "coordinator",
363
+ }).Inc()
364
+
365
+ c.resetTxExpiry(ttl, c.currentTransaction.ID)
366
+
367
+ if err := c.remote.BroadcastTransaction(ctx, tx); err != nil {
368
+ // we could not open the transaction on every node, therefore we need to
369
+ // abort it everywhere.
370
+
371
+ if err := c.remote.BroadcastAbortTransaction(ctx, tx); err != nil {
372
+ c.logger.WithFields(logrus.Fields{
373
+ "action": "broadcast_abort_transaction",
374
+ // before https://github.com/weaviate/weaviate/issues/2625 the next
375
+ // line would read
376
+ //
377
+ // "id": c.currentTransaction.ID
378
+ //
379
+ // which had the potential for races. The tx itself is immutable and
380
+ // therefore always thread-safe. However, the association between the tx
381
+ // manager and the current tx is mutable, therefore the
382
+ // c.currentTransaction pointer could be nil (nil pointer panic) or
383
+ // point to another tx (incorrect log).
384
+ "id": tx.ID,
385
+ }).WithError(err).Errorf("broadcast tx abort failed")
386
+ }
387
+
388
+ c.Lock()
389
+ c.clearTransaction()
390
+ monitoring.GetMetrics().SchemaTxClosed.With(prometheus.Labels{
391
+ "ownership": "coordinator",
392
+ "status": "abort",
393
+ }).Inc()
394
+ took := time.Since(c.currentTransactionBegin)
395
+ monitoring.GetMetrics().SchemaTxDuration.With(prometheus.Labels{
396
+ "ownership": "coordinator",
397
+ "status": "abort",
398
+ }).Observe(took.Seconds())
399
+ c.slowLog.Close("abort_on_open")
400
+ c.Unlock()
401
+
402
+ return nil, errors.Wrap(err, "broadcast open transaction")
403
+ }
404
+
405
+ c.Lock()
406
+ defer c.Unlock()
407
+ c.slowLog.Update("begin_tx_completed")
408
+ return c.currentTransaction, nil
409
+ }
410
+
411
+ func (c *TxManager) CommitWriteTransaction(ctx context.Context,
412
+ tx *Transaction,
413
+ ) error {
414
+ c.Lock()
415
+
416
+ if !c.acceptIncoming {
417
+ c.Unlock()
418
+ return ErrNotReady
419
+ }
420
+
421
+ if c.currentTransaction == nil || c.currentTransaction.ID != tx.ID {
422
+ expired := c.expired(tx.ID)
423
+ c.Unlock()
424
+ if expired {
425
+ return ErrExpiredTransaction
426
+ }
427
+ return ErrInvalidTransaction
428
+ }
429
+
430
+ c.Unlock()
431
+ c.slowLog.Update("commit_started")
432
+
433
+ // now that we know we are dealing with a valid transaction: no matter the
434
+ // outcome, after this call, we should not have a local transaction anymore
435
+ defer func() {
436
+ c.Lock()
437
+ c.clearTransaction()
438
+ monitoring.GetMetrics().SchemaTxClosed.With(prometheus.Labels{
439
+ "ownership": "coordinator",
440
+ "status": "commit",
441
+ }).Inc()
442
+ took := time.Since(c.currentTransactionBegin)
443
+ monitoring.GetMetrics().SchemaTxDuration.With(prometheus.Labels{
444
+ "ownership": "coordinator",
445
+ "status": "commit",
446
+ }).Observe(took.Seconds())
447
+ c.slowLog.Close("committed")
448
+ c.Unlock()
449
+ }()
450
+
451
+ if err := c.remote.BroadcastCommitTransaction(ctx, tx); err != nil {
452
+ // the broadcast failed, but we can't do anything about it. If we would
453
+ // broadcast an "abort" now (as a previous version did) we'd likely run
454
+ // into an inconsistency down the line. Network requests have variable
455
+ // time, so there's a chance some nodes would see the abort before the
456
+ // commit and vice-versa. Given enough nodes, we would end up with an
457
+ // inconsistent state.
458
+ //
459
+ // A failed commit means the node that didn't receive the commit needs to
460
+ // figure out itself how to get back to the correct state (e.g. by
461
+ // recovering from a persisted tx), don't jeopardize all the other nodes as
462
+ // a result!
463
+ c.logger.WithFields(logrus.Fields{
464
+ "action": "broadcast_commit_transaction",
465
+ "id": tx.ID,
466
+ }).WithError(err).Error("broadcast tx commit failed")
467
+ return errors.Wrap(err, "broadcast commit transaction")
468
+ }
469
+
470
+ return nil
471
+ }
472
+
473
+ func (c *TxManager) IncomingBeginTransaction(ctx context.Context,
474
+ tx *Transaction,
475
+ ) ([]byte, error) {
476
+ c.Lock()
477
+ defer c.Unlock()
478
+
479
+ if !c.acceptIncoming && !slices.Contains(c.allowUnready, tx.Type) {
480
+ return nil, ErrNotReady
481
+ }
482
+
483
+ if c.currentTransaction != nil && c.currentTransaction.ID != tx.ID {
484
+ return nil, ErrConcurrentTransaction
485
+ }
486
+
487
+ writable := !slices.Contains(c.allowUnready, tx.Type)
488
+ c.slowLog.Start(tx.ID, false, writable)
489
+
490
+ if err := c.persistence.StoreTx(ctx, tx); err != nil {
491
+ return nil, fmt.Errorf("make tx durable: %w", err)
492
+ }
493
+
494
+ c.currentTransaction = tx
495
+ c.currentTransactionBegin = time.Now()
496
+ data, err := c.responseFn(ctx, tx)
497
+ if err != nil {
498
+ return nil, err
499
+ }
500
+
501
+ monitoring.GetMetrics().SchemaTxOpened.With(prometheus.Labels{
502
+ "ownership": "participant",
503
+ }).Inc()
504
+
505
+ var ttl time.Duration
506
+ if tx.Deadline.UnixMilli() != 0 {
507
+ ttl = time.Until(tx.Deadline)
508
+ }
509
+ c.resetTxExpiry(ttl, tx.ID)
510
+
511
+ c.slowLog.Update("incoming_begin_tx_completed")
512
+
513
+ return data, nil
514
+ }
515
+
516
+ func (c *TxManager) IncomingAbortTransaction(ctx context.Context,
517
+ tx *Transaction,
518
+ ) {
519
+ c.Lock()
520
+ defer c.Unlock()
521
+
522
+ if c.currentTransaction == nil || c.currentTransaction.ID != tx.ID {
523
+ // don't do anything
524
+ return
525
+ }
526
+
527
+ c.currentTransaction = nil
528
+ monitoring.GetMetrics().SchemaTxClosed.With(prometheus.Labels{
529
+ "ownership": "participant",
530
+ "status": "abort",
531
+ }).Inc()
532
+ took := time.Since(c.currentTransactionBegin)
533
+ monitoring.GetMetrics().SchemaTxDuration.With(prometheus.Labels{
534
+ "ownership": "participant",
535
+ "status": "abort",
536
+ }).Observe(took.Seconds())
537
+ c.slowLog.Close("abort_request_received")
538
+
539
+ if err := c.persistence.DeleteTx(ctx, tx.ID); err != nil {
540
+ c.logger.WithError(err).Error("abort tx")
541
+ }
542
+ }
543
+
544
+ func (c *TxManager) IncomingCommitTransaction(ctx context.Context,
545
+ tx *Transaction,
546
+ ) error {
547
+ c.ongoingCommits.Add(1)
548
+ defer c.ongoingCommits.Done()
549
+
550
+ // requires locking because it accesses c.currentTransaction
551
+ txCopy, err := c.incomingCommitTxValidate(ctx, tx)
552
+ if err != nil {
553
+ return err
554
+ }
555
+
556
+ c.slowLog.Update("commit_request_received")
557
+
558
+ // cleanup requires locking because it accesses c.currentTransaction
559
+ defer c.incomingTxCommitCleanup(ctx, tx)
560
+
561
+ // commit cannot use locking because of risk of deadlock, see comment inside method
562
+ if err := c.incomingTxCommitApplyCommitFn(ctx, txCopy); err != nil {
563
+ return err
564
+ }
565
+
566
+ return nil
567
+ }
568
+
569
+ func (c *TxManager) incomingCommitTxValidate(
570
+ ctx context.Context, tx *Transaction,
571
+ ) (*Transaction, error) {
572
+ c.Lock()
573
+ defer c.Unlock()
574
+
575
+ if !c.acceptIncoming {
576
+ return nil, ErrNotReady
577
+ }
578
+
579
+ if c.currentTransaction == nil || c.currentTransaction.ID != tx.ID {
580
+ expired := c.expired(tx.ID)
581
+ if expired {
582
+ return nil, ErrExpiredTransaction
583
+ }
584
+ return nil, ErrInvalidTransaction
585
+ }
586
+
587
+ txCopy := *c.currentTransaction
588
+ return &txCopy, nil
589
+ }
590
+
591
+ func (c *TxManager) incomingTxCommitApplyCommitFn(
592
+ ctx context.Context, tx *Transaction,
593
+ ) error {
594
+ // Important: Do not hold the c.Lock() while applying the commitFn. The
595
+ // c.Lock() is only meant to make access to c.currentTransaction thread-safe.
596
+ // If we would hold it during apply, there is a risk for a deadlock because
597
+ // apply will likely lock the schema Manager. The schema Manager itself
598
+ // however, might be waiting for the TxManager in case of concurrent
599
+ // requests.
600
+ // See https://github.com/weaviate/weaviate/issues/4312 for steps on how to
601
+ // reproduce
602
+ //
603
+ // use transaction from cache, not passed in for two reason: a. protect
604
+ // against the transaction being manipulated after being created, b. allow
605
+ // an "empty" transaction that only contains the id for less network overhead
606
+ // (we don't need to pass the payload around anymore, after it's successfully
607
+ // opened - every node has a copy of the payload now)
608
+ return c.commitFn(ctx, tx)
609
+ }
610
+
611
+ func (c *TxManager) incomingTxCommitCleanup(
612
+ ctx context.Context, tx *Transaction,
613
+ ) {
614
+ c.Lock()
615
+ defer c.Unlock()
616
+ c.currentTransaction = nil
617
+
618
+ monitoring.GetMetrics().SchemaTxClosed.With(prometheus.Labels{
619
+ "ownership": "participant",
620
+ "status": "commit",
621
+ }).Inc()
622
+ took := time.Since(c.currentTransactionBegin)
623
+ monitoring.GetMetrics().SchemaTxDuration.With(prometheus.Labels{
624
+ "ownership": "participant",
625
+ "status": "commit",
626
+ }).Observe(took.Seconds())
627
+ c.slowLog.Close("committed")
628
+
629
+ if err := c.persistence.DeleteTx(ctx, tx.ID); err != nil {
630
+ c.logger.WithError(err).WithFields(logrus.Fields{
631
+ "action": "incoming_tx_commit_cleanup",
632
+ }).Error("close tx on disk")
633
+ }
634
+ }
635
+
636
+ func (c *TxManager) Shutdown() {
637
+ c.Lock()
638
+ c.acceptIncoming = false
639
+ c.Unlock()
640
+
641
+ c.ongoingCommits.Wait()
642
+ }
643
+
644
+ type Transaction struct {
645
+ ID string
646
+ Type TransactionType
647
+ Payload interface{}
648
+ Deadline time.Time
649
+
650
+ // If TolerateNodeFailures is false (the default) a transaction cannot be
651
+ // opened or committed if a node is confirmed dead. If a node is only
652
+ // suspected dead, the TxManager will try, but abort unless all nodes ACK.
653
+ TolerateNodeFailures bool
654
+ }
655
+
656
+ type Persistence interface {
657
+ StoreTx(ctx context.Context, tx *Transaction) error
658
+ DeleteTx(ctx context.Context, txID string) error
659
+ IterateAll(ctx context.Context, cb func(tx *Transaction)) error
660
+ }
platform/dbops/binaries/weaviate-src/usecases/config/authentication.go ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package config
13
+
14
+ import (
15
+ "fmt"
16
+
17
+ "github.com/weaviate/weaviate/usecases/config/runtime"
18
+ )
19
+
20
+ // Authentication configuration
21
+ type Authentication struct {
22
+ OIDC OIDC `json:"oidc" yaml:"oidc"`
23
+ AnonymousAccess AnonymousAccess `json:"anonymous_access" yaml:"anonymous_access"`
24
+ APIKey StaticAPIKey // don't change name to not break yaml files
25
+ DBUsers DbUsers `json:"db_users" yaml:"db_users"`
26
+ }
27
+
28
+ // DefaultAuthentication is the default authentication scheme when no authentication is provided
29
+ var DefaultAuthentication = Authentication{
30
+ AnonymousAccess: AnonymousAccess{
31
+ Enabled: true,
32
+ },
33
+ }
34
+
35
+ // Validate the Authentication configuration. This only validates at a general
36
+ // level. Validation specific to the individual auth methods should happen
37
+ // inside their respective packages
38
+ func (a Authentication) Validate() error {
39
+ if !a.AnyAuthMethodSelected() {
40
+ return fmt.Errorf("no authentication scheme configured, you must select at least one")
41
+ }
42
+
43
+ return nil
44
+ }
45
+
46
+ func (a Authentication) AnyAuthMethodSelected() bool {
47
+ return a.AnonymousAccess.Enabled || a.OIDC.Enabled || a.APIKey.Enabled || a.DBUsers.Enabled
48
+ }
49
+
50
+ func (a Authentication) AnyApiKeyAvailable() bool {
51
+ return a.APIKey.Enabled || a.DBUsers.Enabled
52
+ }
53
+
54
+ // AnonymousAccess considers users without any auth information as
55
+ // authenticated as "anonymous" rather than denying their request immediately.
56
+ // Note that enabling anonymous access ONLY affects Authentication, not
57
+ // Authorization.
58
+ type AnonymousAccess struct {
59
+ Enabled bool `json:"enabled" yaml:"enabled"`
60
+ }
61
+
62
+ // OIDC configures the OIDC middleware
63
+ type OIDC struct {
64
+ Enabled bool `json:"enabled" yaml:"enabled"`
65
+ Issuer *runtime.DynamicValue[string] `json:"issuer" yaml:"issuer"`
66
+ ClientID *runtime.DynamicValue[string] `json:"client_id" yaml:"client_id"`
67
+ SkipClientIDCheck *runtime.DynamicValue[bool] `yaml:"skip_client_id_check" json:"skip_client_id_check"`
68
+ UsernameClaim *runtime.DynamicValue[string] `yaml:"username_claim" json:"username_claim"`
69
+ GroupsClaim *runtime.DynamicValue[string] `yaml:"groups_claim" json:"groups_claim"`
70
+ Scopes *runtime.DynamicValue[[]string] `yaml:"scopes" json:"scopes"`
71
+ Certificate *runtime.DynamicValue[string] `yaml:"certificate" json:"certificate"`
72
+ JWKSUrl *runtime.DynamicValue[string] `yaml:"jwks_url" json:"jwks_url"`
73
+ }
74
+
75
+ type StaticAPIKey struct {
76
+ Enabled bool `json:"enabled" yaml:"enabled"`
77
+ Users []string `json:"users" yaml:"users"`
78
+ AllowedKeys []string `json:"allowed_keys" yaml:"allowed_keys"`
79
+ }
80
+
81
+ type DbUsers struct {
82
+ Enabled bool `json:"enabled" yaml:"enabled"`
83
+ }
platform/dbops/binaries/weaviate-src/usecases/config/authentication_test.go ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package config
13
+
14
+ import (
15
+ "fmt"
16
+ "testing"
17
+
18
+ "github.com/stretchr/testify/assert"
19
+ "github.com/stretchr/testify/require"
20
+ )
21
+
22
+ func TestConfig_Authentication(t *testing.T) {
23
+ t.Run("no auth selected", func(t *testing.T) {
24
+ auth := Authentication{}
25
+ expected := fmt.Errorf("no authentication scheme configured, you must select at least one")
26
+
27
+ err := auth.Validate()
28
+
29
+ assert.Equal(t, expected, err)
30
+ })
31
+
32
+ t.Run("only anonymous selected", func(t *testing.T) {
33
+ auth := Authentication{
34
+ AnonymousAccess: AnonymousAccess{
35
+ Enabled: true,
36
+ },
37
+ }
38
+
39
+ err := auth.Validate()
40
+
41
+ assert.Nil(t, err, "should not error")
42
+ })
43
+
44
+ t.Run("only oidc selected", func(t *testing.T) {
45
+ auth := Authentication{
46
+ OIDC: OIDC{
47
+ Enabled: true,
48
+ },
49
+ }
50
+
51
+ err := auth.Validate()
52
+
53
+ assert.Nil(t, err, "should not error")
54
+ })
55
+
56
+ t.Run("oidc and anonymous enabled together", func(t *testing.T) {
57
+ // this might seem counter-intuitive at first, but this makes a lot of
58
+ // sense when you consider the authorization strategies: for example we
59
+ // could allow reads for everyone, but only explicitly authenticated users
60
+ // may write
61
+ auth := Authentication{
62
+ OIDC: OIDC{
63
+ Enabled: true,
64
+ },
65
+ AnonymousAccess: AnonymousAccess{
66
+ Enabled: true,
67
+ },
68
+ }
69
+
70
+ err := auth.Validate()
71
+
72
+ assert.Nil(t, err, "should not error")
73
+ })
74
+ }
75
+
76
+ func TestDbUserAuth(t *testing.T) {
77
+ tests := []struct {
78
+ name string
79
+ staticEnabled bool
80
+ dbEnabled bool
81
+ expected bool
82
+ }{
83
+ {"none enabled", false, false, false},
84
+ {"both enabled", true, true, true},
85
+ {"only static", true, false, true},
86
+ {"only db", false, true, true},
87
+ }
88
+
89
+ for _, test := range tests {
90
+ t.Run(test.name, func(t *testing.T) {
91
+ auth := Authentication{
92
+ APIKey: StaticAPIKey{Enabled: test.staticEnabled}, DBUsers: DbUsers{Enabled: test.dbEnabled},
93
+ }
94
+
95
+ require.Equal(t, auth.AnyApiKeyAvailable(), test.expected)
96
+ })
97
+ }
98
+ }
platform/dbops/binaries/weaviate-src/usecases/config/authorization.go ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package config
13
+
14
+ import (
15
+ "fmt"
16
+
17
+ "github.com/weaviate/weaviate/usecases/auth/authorization/adminlist"
18
+ "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf"
19
+ )
20
+
21
+ // Authorization configuration
22
+ type Authorization struct {
23
+ AdminList adminlist.Config `json:"admin_list" yaml:"admin_list"`
24
+ Rbac rbacconf.Config `json:"rbac" yaml:"rbac"`
25
+ }
26
+
27
+ // Validate the Authorization configuration. This only validates at a general
28
+ // level. Validation specific to the individual auth methods should happen
29
+ // inside their respective packages
30
+ func (a Authorization) Validate() error {
31
+ if a.AdminList.Enabled && a.Rbac.Enabled {
32
+ return fmt.Errorf("cannot enable adminlist and rbac at the same time")
33
+ }
34
+
35
+ if a.AdminList.Enabled {
36
+ if err := a.AdminList.Validate(); err != nil {
37
+ return fmt.Errorf("authorization adminlist: %w", err)
38
+ }
39
+ }
40
+
41
+ if a.Rbac.Enabled {
42
+ if err := a.Rbac.Validate(); err != nil {
43
+ return fmt.Errorf("authorization rbac: %w", err)
44
+ }
45
+ }
46
+
47
+ return nil
48
+ }
platform/dbops/binaries/weaviate-src/usecases/config/authorization_test.go ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package config
13
+
14
+ import (
15
+ "testing"
16
+
17
+ "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf"
18
+
19
+ "github.com/stretchr/testify/assert"
20
+ "github.com/weaviate/weaviate/usecases/auth/authorization/adminlist"
21
+ )
22
+
23
+ func Test_Validation(t *testing.T) {
24
+ configs := []struct {
25
+ name string
26
+ config Authorization
27
+ wantErr bool
28
+ }{
29
+ {
30
+ name: "Only adminlist",
31
+ config: Authorization{AdminList: adminlist.Config{Enabled: true}},
32
+ wantErr: false,
33
+ },
34
+ {
35
+ name: "Only rbac",
36
+ config: Authorization{Rbac: rbacconf.Config{Enabled: true, RootUsers: []string{"1"}}},
37
+ wantErr: false,
38
+ },
39
+ {
40
+ name: "Only adminlist - wrong config",
41
+ config: Authorization{AdminList: adminlist.Config{Enabled: true, Users: []string{"1"}, ReadOnlyUsers: []string{"1"}}},
42
+ wantErr: true,
43
+ },
44
+ {
45
+ name: "both adminlist and rbac",
46
+ config: Authorization{
47
+ AdminList: adminlist.Config{Enabled: true},
48
+ Rbac: rbacconf.Config{Enabled: true, RootUsers: []string{"1"}},
49
+ },
50
+ wantErr: true,
51
+ },
52
+ }
53
+
54
+ for _, tt := range configs {
55
+ t.Run(tt.name, func(t *testing.T) {
56
+ err := tt.config.Validate()
57
+ if tt.wantErr {
58
+ assert.Error(t, err)
59
+ } else {
60
+ assert.NoError(t, err)
61
+ }
62
+ })
63
+ }
64
+ }
platform/dbops/binaries/weaviate-src/usecases/config/auto_schema_test.go ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package config
13
+
14
+ import (
15
+ "fmt"
16
+ "testing"
17
+
18
+ "github.com/stretchr/testify/assert"
19
+ "github.com/weaviate/weaviate/entities/schema"
20
+ )
21
+
22
+ func TestConfig_AutoSchema(t *testing.T) {
23
+ t.Run("invalid DefaultNumber", func(t *testing.T) {
24
+ auth := AutoSchema{
25
+ DefaultNumber: "float",
26
+ DefaultString: schema.DataTypeText.String(),
27
+ DefaultDate: "date",
28
+ }
29
+ expected := fmt.Errorf("autoSchema.defaultNumber must be either 'int' or 'number")
30
+ err := auth.Validate()
31
+ assert.Equal(t, expected, err)
32
+ })
33
+
34
+ t.Run("invalid DefaultString", func(t *testing.T) {
35
+ auth := AutoSchema{
36
+ DefaultNumber: "int",
37
+ DefaultString: "body",
38
+ DefaultDate: "date",
39
+ }
40
+ expected := fmt.Errorf("autoSchema.defaultString must be either 'string' or 'text")
41
+ err := auth.Validate()
42
+ assert.Equal(t, expected, err)
43
+ })
44
+
45
+ t.Run("invalid DefaultDate", func(t *testing.T) {
46
+ auth := AutoSchema{
47
+ DefaultNumber: "int",
48
+ DefaultString: schema.DataTypeText.String(),
49
+ DefaultDate: "int",
50
+ }
51
+ expected := fmt.Errorf("autoSchema.defaultDate must be either 'date' or 'string' or 'text")
52
+ err := auth.Validate()
53
+ assert.Equal(t, expected, err)
54
+ })
55
+
56
+ t.Run("all valid AutoSchema configurations", func(t *testing.T) {
57
+ auth := AutoSchema{
58
+ DefaultNumber: "int",
59
+ DefaultString: schema.DataTypeText.String(),
60
+ DefaultDate: "date",
61
+ }
62
+ err := auth.Validate()
63
+ assert.Nil(t, err, "should not error")
64
+ })
65
+ }
platform/dbops/binaries/weaviate-src/usecases/config/config_handler.go ADDED
@@ -0,0 +1,752 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package config
13
+
14
+ import (
15
+ "encoding/json"
16
+ "fmt"
17
+ "math"
18
+ "os"
19
+ "regexp"
20
+ "strings"
21
+ "time"
22
+
23
+ "github.com/go-openapi/swag"
24
+ "github.com/pkg/errors"
25
+ "github.com/sirupsen/logrus"
26
+ "gopkg.in/yaml.v3"
27
+
28
+ "github.com/weaviate/weaviate/deprecations"
29
+ entcfg "github.com/weaviate/weaviate/entities/config"
30
+ "github.com/weaviate/weaviate/entities/replication"
31
+ "github.com/weaviate/weaviate/entities/schema"
32
+ entsentry "github.com/weaviate/weaviate/entities/sentry"
33
+ "github.com/weaviate/weaviate/entities/vectorindex/common"
34
+ "github.com/weaviate/weaviate/usecases/cluster"
35
+ "github.com/weaviate/weaviate/usecases/config/runtime"
36
+ usagetypes "github.com/weaviate/weaviate/usecases/modulecomponents/usage/types"
37
+ "github.com/weaviate/weaviate/usecases/monitoring"
38
+ )
39
+
40
+ // ServerVersion is deprecated. Use `build.Version`. It's there for backward compatiblility.
41
+ // ServerVersion is set when the misc handlers are setup.
42
+ // When misc handlers are setup, the entire swagger spec
43
+ // is already being parsed for the server version. This is
44
+ // a good time for us to set ServerVersion, so that the
45
+ // spec only needs to be parsed once.
46
+ var ServerVersion string
47
+
48
+ // DefaultConfigFile is the default file when no config file is provided
49
+ const DefaultConfigFile string = "./weaviate.conf.json"
50
+
51
+ // DefaultCleanupIntervalSeconds can be overwritten on a per-class basis
52
+ const DefaultCleanupIntervalSeconds = int64(60)
53
+
54
+ const (
55
+ // These BM25 tuning params can be overwritten on a per-class basis
56
+ DefaultBM25k1 = float32(1.2)
57
+ DefaultBM25b = float32(0.75)
58
+ )
59
+
60
+ var DefaultUsingBlockMaxWAND = os.Getenv("USE_INVERTED_SEARCHABLE") == "" || entcfg.Enabled(os.Getenv("USE_INVERTED_SEARCHABLE"))
61
+
62
+ const (
63
+ DefaultMaxImportGoroutinesFactor = float64(1.5)
64
+
65
+ DefaultDiskUseWarningPercentage = uint64(80)
66
+ DefaultDiskUseReadonlyPercentage = uint64(90)
67
+ DefaultMemUseWarningPercentage = uint64(80)
68
+ // TODO: off by default for now, to make sure
69
+ // the measurement is reliable. once
70
+ // confirmed, we can set this to 90
71
+ DefaultMemUseReadonlyPercentage = uint64(0)
72
+ )
73
+
74
+ // Flags are input options
75
+ type Flags struct {
76
+ ConfigFile string `long:"config-file" description:"path to config file (default: ./weaviate.conf.json)"`
77
+
78
+ RaftPort int `long:"raft-port" description:"the port used by Raft for inter-node communication"`
79
+ RaftInternalRPCPort int `long:"raft-internal-rpc-port" description:"the port used for internal RPCs within the cluster"`
80
+ RaftRPCMessageMaxSize int `long:"raft-rpc-message-max-size" description:"maximum internal raft grpc message size in bytes, defaults to 1073741824"`
81
+ RaftJoin []string `long:"raft-join" description:"a comma-separated list of server addresses to join on startup. Each element needs to be in the form NODE_NAME[:NODE_PORT]. If NODE_PORT is not present, raft-internal-rpc-port default value will be used instead"`
82
+ RaftBootstrapTimeout int `long:"raft-bootstrap-timeout" description:"the duration for which the raft bootstrap procedure will wait for each node in raft-join to be reachable"`
83
+ RaftBootstrapExpect int `long:"raft-bootstrap-expect" description:"specifies the number of server nodes to wait for before bootstrapping the cluster"`
84
+ RaftHeartbeatTimeout int `long:"raft-heartbeat-timeout" description:"raft heartbeat timeout"`
85
+ RaftElectionTimeout int `long:"raft-election-timeout" description:"raft election timeout"`
86
+ RaftSnapshotThreshold int `long:"raft-snap-threshold" description:"number of outstanding log entries before performing a snapshot"`
87
+ RaftSnapshotInterval int `long:"raft-snap-interval" description:"controls how often raft checks if it should perform a snapshot"`
88
+ RaftMetadataOnlyVoters bool `long:"raft-metadata-only-voters" description:"configures the voters to store metadata exclusively, without storing any other data"`
89
+
90
+ RuntimeOverridesEnabled bool `long:"runtime-overrides.enabled" description:"enable runtime overrides config"`
91
+ RuntimeOverridesPath string `long:"runtime-overrides.path" description:"path to runtime overrides config"`
92
+ RuntimeOverridesLoadInterval time.Duration `long:"runtime-overrides.load-interval" description:"load interval for runtime overrides config"`
93
+ }
94
+
95
+ type SchemaHandlerConfig struct {
96
+ MaximumAllowedCollectionsCount *runtime.DynamicValue[int] `json:"maximum_allowed_collections_count" yaml:"maximum_allowed_collections_count"`
97
+ }
98
+
99
+ type RuntimeOverrides struct {
100
+ Enabled bool `json:"enabled"`
101
+ Path string `json:"path" yaml:"path"`
102
+ LoadInterval time.Duration `json:"load_interval" yaml:"load_interval"`
103
+ }
104
+
105
+ // Config outline of the config file
106
+ type Config struct {
107
+ Name string `json:"name" yaml:"name"`
108
+ Debug bool `json:"debug" yaml:"debug"`
109
+ QueryDefaults QueryDefaults `json:"query_defaults" yaml:"query_defaults"`
110
+ QueryMaximumResults int64 `json:"query_maximum_results" yaml:"query_maximum_results"`
111
+ QueryHybridMaximumResults int64 `json:"query_hybrid_maximum_results" yaml:"query_hybrid_maximum_results"`
112
+ QueryNestedCrossReferenceLimit int64 `json:"query_nested_cross_reference_limit" yaml:"query_nested_cross_reference_limit"`
113
+ QueryCrossReferenceDepthLimit int `json:"query_cross_reference_depth_limit" yaml:"query_cross_reference_depth_limit"`
114
+ Contextionary Contextionary `json:"contextionary" yaml:"contextionary"`
115
+ Authentication Authentication `json:"authentication" yaml:"authentication"`
116
+ Authorization Authorization `json:"authorization" yaml:"authorization"`
117
+ Origin string `json:"origin" yaml:"origin"`
118
+ Persistence Persistence `json:"persistence" yaml:"persistence"`
119
+ DefaultVectorizerModule string `json:"default_vectorizer_module" yaml:"default_vectorizer_module"`
120
+ DefaultVectorDistanceMetric string `json:"default_vector_distance_metric" yaml:"default_vector_distance_metric"`
121
+ EnableModules string `json:"enable_modules" yaml:"enable_modules"`
122
+ EnableApiBasedModules bool `json:"api_based_modules_disabled" yaml:"api_based_modules_disabled"`
123
+ ModulesPath string `json:"modules_path" yaml:"modules_path"`
124
+ ModuleHttpClientTimeout time.Duration `json:"modules_client_timeout" yaml:"modules_client_timeout"`
125
+ AutoSchema AutoSchema `json:"auto_schema" yaml:"auto_schema"`
126
+ Cluster cluster.Config `json:"cluster" yaml:"cluster"`
127
+ Replication replication.GlobalConfig `json:"replication" yaml:"replication"`
128
+ Monitoring monitoring.Config `json:"monitoring" yaml:"monitoring"`
129
+ GRPC GRPC `json:"grpc" yaml:"grpc"`
130
+ Profiling Profiling `json:"profiling" yaml:"profiling"`
131
+ ResourceUsage ResourceUsage `json:"resource_usage" yaml:"resource_usage"`
132
+ MaxImportGoroutinesFactor float64 `json:"max_import_goroutine_factor" yaml:"max_import_goroutine_factor"`
133
+ MaximumConcurrentGetRequests int `json:"maximum_concurrent_get_requests" yaml:"maximum_concurrent_get_requests"`
134
+ MaximumConcurrentShardLoads int `json:"maximum_concurrent_shard_loads" yaml:"maximum_concurrent_shard_loads"`
135
+ TrackVectorDimensions bool `json:"track_vector_dimensions" yaml:"track_vector_dimensions"`
136
+ TrackVectorDimensionsInterval time.Duration `json:"track_vector_dimensions_interval" yaml:"track_vector_dimensions_interval"`
137
+ ReindexVectorDimensionsAtStartup bool `json:"reindex_vector_dimensions_at_startup" yaml:"reindex_vector_dimensions_at_startup"`
138
+ DisableLazyLoadShards bool `json:"disable_lazy_load_shards" yaml:"disable_lazy_load_shards"`
139
+ ForceFullReplicasSearch bool `json:"force_full_replicas_search" yaml:"force_full_replicas_search"`
140
+ TransferInactivityTimeout time.Duration `json:"transfer_inactivity_timeout" yaml:"transfer_inactivity_timeout"`
141
+ RecountPropertiesAtStartup bool `json:"recount_properties_at_startup" yaml:"recount_properties_at_startup"`
142
+ ReindexSetToRoaringsetAtStartup bool `json:"reindex_set_to_roaringset_at_startup" yaml:"reindex_set_to_roaringset_at_startup"`
143
+ ReindexerGoroutinesFactor float64 `json:"reindexer_goroutines_factor" yaml:"reindexer_goroutines_factor"`
144
+ ReindexMapToBlockmaxAtStartup bool `json:"reindex_map_to_blockmax_at_startup" yaml:"reindex_map_to_blockmax_at_startup"`
145
+ ReindexMapToBlockmaxConfig MapToBlockamaxConfig `json:"reindex_map_to_blockmax_config" yaml:"reindex_map_to_blockmax_config"`
146
+ IndexMissingTextFilterableAtStartup bool `json:"index_missing_text_filterable_at_startup" yaml:"index_missing_text_filterable_at_startup"`
147
+ DisableGraphQL bool `json:"disable_graphql" yaml:"disable_graphql"`
148
+ AvoidMmap bool `json:"avoid_mmap" yaml:"avoid_mmap"`
149
+ CORS CORS `json:"cors" yaml:"cors"`
150
+ DisableTelemetry bool `json:"disable_telemetry" yaml:"disable_telemetry"`
151
+ HNSWStartupWaitForVectorCache bool `json:"hnsw_startup_wait_for_vector_cache" yaml:"hnsw_startup_wait_for_vector_cache"`
152
+ HNSWVisitedListPoolMaxSize int `json:"hnsw_visited_list_pool_max_size" yaml:"hnsw_visited_list_pool_max_size"`
153
+ HNSWFlatSearchConcurrency int `json:"hnsw_flat_search_concurrency" yaml:"hnsw_flat_search_concurrency"`
154
+ HNSWAcornFilterRatio float64 `json:"hnsw_acorn_filter_ratio" yaml:"hnsw_acorn_filter_ratio"`
155
+ Sentry *entsentry.ConfigOpts `json:"sentry" yaml:"sentry"`
156
+ MetadataServer MetadataServer `json:"metadata_server" yaml:"metadata_server"`
157
+ SchemaHandlerConfig SchemaHandlerConfig `json:"schema" yaml:"schema"`
158
+ DistributedTasks DistributedTasksConfig `json:"distributed_tasks" yaml:"distributed_tasks"`
159
+ ReplicationEngineMaxWorkers int `json:"replication_engine_max_workers" yaml:"replication_engine_max_workers"`
160
+ ReplicationEngineFileCopyWorkers int `json:"replication_engine_file_copy_workers" yaml:"replication_engine_file_copy_workers"`
161
+ // Raft Specific configuration
162
+ // TODO-RAFT: Do we want to be able to specify these with config file as well ?
163
+ Raft Raft
164
+
165
+ // map[className][]propertyName
166
+ ReindexIndexesAtStartup map[string][]string `json:"reindex_indexes_at_startup" yaml:"reindex_indexes_at_startup"`
167
+
168
+ RuntimeOverrides RuntimeOverrides `json:"runtime_overrides" yaml:"runtime_overrides"`
169
+
170
+ ReplicaMovementDisabled bool `json:"replica_movement_disabled" yaml:"replica_movement_disabled"`
171
+ ReplicaMovementMinimumAsyncWait *runtime.DynamicValue[time.Duration] `json:"REPLICA_MOVEMENT_MINIMUM_ASYNC_WAIT" yaml:"REPLICA_MOVEMENT_MINIMUM_ASYNC_WAIT"`
172
+
173
+ // TenantActivityReadLogLevel is 'debug' by default as every single READ
174
+ // interaction with a tenant leads to a log line. However, this may
175
+ // temporarily be desired, e.g. for analysis or debugging purposes. In this
176
+ // case the log level can be elevated, e.g. to 'info'. This is overall less
177
+ // noisy than changing the global log level, but still allows to see all
178
+ // tenant read activity.
179
+ TenantActivityReadLogLevel *runtime.DynamicValue[string] `json:"tenant_activity_read_log_level" yaml:"tenant_activity_read_log_level"`
180
+ // TenantActivityWriteLogLevel is 'debug' by default as every single WRITE
181
+ // interaction with a tenant leads to a log line. However, this may
182
+ // temporarily be desired, e.g. for analysis or debugging purposes. In this
183
+ // case the log level can be elevated, e.g. to 'info'. This is overall less
184
+ // noisy than changing the global log level, but still allows to see all
185
+ // tenant write activity.
186
+ TenantActivityWriteLogLevel *runtime.DynamicValue[string] `json:"tenant_activity_write_log_level" yaml:"tenant_activity_write_log_level"`
187
+
188
+ // RevectorizeCheck is an optimization where Weaviate checks if a vector can
189
+ // be reused from a previous version of the object, for example because the
190
+ // only change was an update of a property that is excluded from
191
+ // vectorization. This check is on by default (backward-compatibility).
192
+ //
193
+ // However, this check comes at a cost, it means that every single insert
194
+ // will turn into a read-before-write pattern, even if the inserted object is
195
+ // new. That is because the logic first needs to check if the object even
196
+ // exists. In cases where write throughput matters and the overwhelming
197
+ // majority of inserts are new, unique objects, it might be advisable to turn
198
+ // this feature off using the provided flag.
199
+ RevectorizeCheckDisabled *runtime.DynamicValue[bool] `json:"revectorize_check_disabled" yaml:"revectorize_check_disabled"`
200
+
201
+ QuerySlowLogEnabled *runtime.DynamicValue[bool] `json:"query_slow_log_enabled" yaml:"query_slow_log_enabled"`
202
+ QuerySlowLogThreshold *runtime.DynamicValue[time.Duration] `json:"query_slow_log_threshold" yaml:"query_slow_log_threshold"`
203
+
204
+ // New classes will be created with the default quantization
205
+ DefaultQuantization *runtime.DynamicValue[string] `json:"default_quantization" yaml:"default_quantization"`
206
+
207
+ QueryBitmapBufsMaxMemory int `json:"query_bitmap_bufs_max_memory" yaml:"query_bitmap_bufs_max_memory"`
208
+ QueryBitmapBufsMaxBufSize int `json:"query_bitmap_bufs_max_buf_size" yaml:"query_bitmap_bufs_max_buf_size"`
209
+
210
+ // InvertedSorterDisabled forces the "objects bucket" strategy and doesn't
211
+ // not consider inverted sorting, even when the query planner thinks this is
212
+ // the better option.
213
+ //
214
+ // Most users should never set this flag, it exists for two reasons:
215
+ // - For benchmarking reasons, this flag can be used to evaluate the
216
+ // (positive) impact of the inverted sorter.
217
+ // - As a safety net to revert to the old behavior in case there is a bug
218
+ // in the inverted indexer despite the very extensive testing.
219
+ //
220
+ // This flat may be removed in the future.
221
+ InvertedSorterDisabled *runtime.DynamicValue[bool] `json:"inverted_sorter_disabled" yaml:"inverted_sorter_disabled"`
222
+
223
+ // Usage configuration for the usage module
224
+ Usage usagetypes.UsageConfig `json:"usage" yaml:"usage"`
225
+
226
+ // The minimum timeout for the server to wait before it returns an error
227
+ MinimumInternalTimeout time.Duration `json:"minimum_internal_timeout" yaml:"minimum_internal_timeout"`
228
+ }
229
+
230
+ type MapToBlockamaxConfig struct {
231
+ SwapBuckets bool `json:"swap_buckets" yaml:"swap_buckets"`
232
+ UnswapBuckets bool `json:"unswap_buckets" yaml:"unswap_buckets"`
233
+ TidyBuckets bool `json:"tidy_buckets" yaml:"tidy_buckets"`
234
+ ReloadShards bool `json:"reload_shards" yaml:"reload_shards"`
235
+ Rollback bool `json:"rollback" yaml:"rollback"`
236
+ ConditionalStart bool `json:"conditional_start" yaml:"conditional_start"`
237
+ ProcessingDurationSeconds int `json:"processing_duration_seconds" yaml:"processing_duration_seconds"`
238
+ PauseDurationSeconds int `json:"pause_duration_seconds" yaml:"pause_duration_seconds"`
239
+ PerObjectDelayMilliseconds int `json:"per_object_delay_milliseconds" yaml:"per_object_delay_milliseconds"`
240
+ Selected []CollectionPropsTenants `json:"selected" yaml:"selected"`
241
+ }
242
+
243
+ type CollectionPropsTenants struct {
244
+ Collection string `json:"collection" yaml:"collection"`
245
+ Props []string `json:"props" yaml:"props"`
246
+ Tenants []string `json:"tenants" yaml:"tenants"`
247
+ }
248
+
249
+ // Validate the configuration
250
+ func (c *Config) Validate() error {
251
+ if err := c.Authentication.Validate(); err != nil {
252
+ return configErr(err)
253
+ }
254
+
255
+ if err := c.Authorization.Validate(); err != nil {
256
+ return configErr(err)
257
+ }
258
+
259
+ if c.Authentication.AnonymousAccess.Enabled && c.Authorization.Rbac.Enabled {
260
+ return fmt.Errorf("cannot enable anonymous access and rbac authorization")
261
+ }
262
+
263
+ if err := c.Persistence.Validate(); err != nil {
264
+ return configErr(err)
265
+ }
266
+
267
+ if err := c.AutoSchema.Validate(); err != nil {
268
+ return configErr(err)
269
+ }
270
+
271
+ if err := c.ResourceUsage.Validate(); err != nil {
272
+ return configErr(err)
273
+ }
274
+
275
+ if err := c.Raft.Validate(); err != nil {
276
+ return configErr(err)
277
+ }
278
+
279
+ return nil
280
+ }
281
+
282
+ // ValidateModules validates the non-nested parameters. Nested objects must provide their own
283
+ // validation methods
284
+ func (c *Config) ValidateModules(modProv moduleProvider) error {
285
+ if err := c.validateDefaultVectorizerModule(modProv); err != nil {
286
+ return errors.Wrap(err, "default vectorizer module")
287
+ }
288
+
289
+ if err := c.validateDefaultVectorDistanceMetric(); err != nil {
290
+ return errors.Wrap(err, "default vector distance metric")
291
+ }
292
+
293
+ return nil
294
+ }
295
+
296
+ func (c *Config) validateDefaultVectorizerModule(modProv moduleProvider) error {
297
+ if c.DefaultVectorizerModule == VectorizerModuleNone {
298
+ return nil
299
+ }
300
+
301
+ return modProv.ValidateVectorizer(c.DefaultVectorizerModule)
302
+ }
303
+
304
+ type moduleProvider interface {
305
+ ValidateVectorizer(moduleName string) error
306
+ }
307
+
308
+ func (c *Config) validateDefaultVectorDistanceMetric() error {
309
+ switch c.DefaultVectorDistanceMetric {
310
+ case "", common.DistanceCosine, common.DistanceDot, common.DistanceL2Squared, common.DistanceManhattan, common.DistanceHamming:
311
+ return nil
312
+ default:
313
+ return fmt.Errorf("must be one of [\"cosine\", \"dot\", \"l2-squared\", \"manhattan\",\"hamming\"]")
314
+ }
315
+ }
316
+
317
+ type AutoSchema struct {
318
+ Enabled *runtime.DynamicValue[bool] `json:"enabled" yaml:"enabled"`
319
+ DefaultString string `json:"defaultString" yaml:"defaultString"`
320
+ DefaultNumber string `json:"defaultNumber" yaml:"defaultNumber"`
321
+ DefaultDate string `json:"defaultDate" yaml:"defaultDate"`
322
+ }
323
+
324
+ func (a AutoSchema) Validate() error {
325
+ if a.DefaultNumber != "int" && a.DefaultNumber != "number" {
326
+ return fmt.Errorf("autoSchema.defaultNumber must be either 'int' or 'number")
327
+ }
328
+ if a.DefaultString != schema.DataTypeText.String() &&
329
+ a.DefaultString != schema.DataTypeString.String() {
330
+ return fmt.Errorf("autoSchema.defaultString must be either 'string' or 'text")
331
+ }
332
+ if a.DefaultDate != "date" &&
333
+ a.DefaultDate != schema.DataTypeText.String() &&
334
+ a.DefaultDate != schema.DataTypeString.String() {
335
+ return fmt.Errorf("autoSchema.defaultDate must be either 'date' or 'string' or 'text")
336
+ }
337
+
338
+ return nil
339
+ }
340
+
341
+ // QueryDefaults for optional parameters
342
+ type QueryDefaults struct {
343
+ Limit int64 `json:"limit" yaml:"limit"`
344
+ LimitGraphQL int64 `json:"limitGraphQL" yaml:"limitGraphQL"`
345
+ }
346
+
347
+ // DefaultQueryDefaultsLimit is the default query limit when no limit is provided
348
+ const (
349
+ DefaultQueryDefaultsLimit int64 = 10
350
+ DefaultQueryDefaultsLimitGraphQL int64 = 100
351
+ )
352
+
353
+ type Contextionary struct {
354
+ URL string `json:"url" yaml:"url"`
355
+ }
356
+
357
+ // Support independent TLS credentials for gRPC
358
+ type GRPC struct {
359
+ Port int `json:"port" yaml:"port"`
360
+ CertFile string `json:"certFile" yaml:"certFile"`
361
+ KeyFile string `json:"keyFile" yaml:"keyFile"`
362
+ MaxMsgSize int `json:"maxMsgSize" yaml:"maxMsgSize"`
363
+ }
364
+
365
+ type Profiling struct {
366
+ BlockProfileRate int `json:"blockProfileRate" yaml:"blockProfileRate"`
367
+ MutexProfileFraction int `json:"mutexProfileFraction" yaml:"mutexProfileFraction"`
368
+ Disabled bool `json:"disabled" yaml:"disabled"`
369
+ Port int `json:"port" yaml:"port"`
370
+ }
371
+
372
+ type DistributedTasksConfig struct {
373
+ Enabled bool `json:"enabled" yaml:"enabled"`
374
+ CompletedTaskTTL time.Duration `json:"completedTaskTTL" yaml:"completedTaskTTL"`
375
+ SchedulerTickInterval time.Duration `json:"schedulerTickInterval" yaml:"schedulerTickInterval"`
376
+ }
377
+
378
+ type Persistence struct {
379
+ DataPath string `json:"dataPath" yaml:"dataPath"`
380
+ MemtablesFlushDirtyAfter int `json:"flushDirtyMemtablesAfter" yaml:"flushDirtyMemtablesAfter"`
381
+ MemtablesMaxSizeMB int `json:"memtablesMaxSizeMB" yaml:"memtablesMaxSizeMB"`
382
+ MemtablesMinActiveDurationSeconds int `json:"memtablesMinActiveDurationSeconds" yaml:"memtablesMinActiveDurationSeconds"`
383
+ MemtablesMaxActiveDurationSeconds int `json:"memtablesMaxActiveDurationSeconds" yaml:"memtablesMaxActiveDurationSeconds"`
384
+ LSMMaxSegmentSize int64 `json:"lsmMaxSegmentSize" yaml:"lsmMaxSegmentSize"`
385
+ LSMSegmentsCleanupIntervalSeconds int `json:"lsmSegmentsCleanupIntervalSeconds" yaml:"lsmSegmentsCleanupIntervalSeconds"`
386
+ LSMSeparateObjectsCompactions bool `json:"lsmSeparateObjectsCompactions" yaml:"lsmSeparateObjectsCompactions"`
387
+ LSMEnableSegmentsChecksumValidation bool `json:"lsmEnableSegmentsChecksumValidation" yaml:"lsmEnableSegmentsChecksumValidation"`
388
+ LSMCycleManagerRoutinesFactor int `json:"lsmCycleManagerRoutinesFactor" yaml:"lsmCycleManagerRoutinesFactor"`
389
+ IndexRangeableInMemory bool `json:"indexRangeableInMemory" yaml:"indexRangeableInMemory"`
390
+ MinMMapSize int64 `json:"minMMapSize" yaml:"minMMapSize"`
391
+ LazySegmentsDisabled bool `json:"lazySegmentsDisabled" yaml:"lazySegmentsDisabled"`
392
+ SegmentInfoIntoFileNameEnabled bool `json:"segmentFileInfoEnabled" yaml:"segmentFileInfoEnabled"`
393
+ WriteMetadataFilesEnabled bool `json:"writeMetadataFilesEnabled" yaml:"writeMetadataFilesEnabled"`
394
+ MaxReuseWalSize int64 `json:"MaxReuseWalSize" yaml:"MaxReuseWalSize"`
395
+ HNSWMaxLogSize int64 `json:"hnswMaxLogSize" yaml:"hnswMaxLogSize"`
396
+ HNSWDisableSnapshots bool `json:"hnswDisableSnapshots" yaml:"hnswDisableSnapshots"`
397
+ HNSWSnapshotIntervalSeconds int `json:"hnswSnapshotIntervalSeconds" yaml:"hnswSnapshotIntervalSeconds"`
398
+ HNSWSnapshotOnStartup bool `json:"hnswSnapshotOnStartup" yaml:"hnswSnapshotOnStartup"`
399
+ HNSWSnapshotMinDeltaCommitlogsNumber int `json:"hnswSnapshotMinDeltaCommitlogsNumber" yaml:"hnswSnapshotMinDeltaCommitlogsNumber"`
400
+ HNSWSnapshotMinDeltaCommitlogsSizePercentage int `json:"hnswSnapshotMinDeltaCommitlogsSizePercentage" yaml:"hnswSnapshotMinDeltaCommitlogsSizePercentage"`
401
+ }
402
+
403
+ // DefaultPersistenceDataPath is the default location for data directory when no location is provided
404
+ const DefaultPersistenceDataPath string = "./data"
405
+
406
+ // DefaultPersistenceLSMMaxSegmentSize is effectively unlimited for backward
407
+ // compatibility. TODO: consider changing this in a future release and make
408
+ // some noise about it. This is technically a breaking change.
409
+ const DefaultPersistenceLSMMaxSegmentSize = math.MaxInt64
410
+
411
+ // DefaultPersistenceLSMSegmentsCleanupIntervalSeconds = 0 for backward compatibility.
412
+ // value = 0 means cleanup is turned off.
413
+ const DefaultPersistenceLSMSegmentsCleanupIntervalSeconds = 0
414
+
415
+ // DefaultPersistenceLSMCycleManagerRoutinesFactor - determines how many goroutines
416
+ // are started for cyclemanager (factor * NUMCPU)
417
+ const DefaultPersistenceLSMCycleManagerRoutinesFactor = 2
418
+
419
+ const DefaultPersistenceHNSWMaxLogSize = 500 * 1024 * 1024 // 500MB for backward compatibility
420
+
421
+ const (
422
+ // minimal interval for new hnws snapshot to be created after last one
423
+ DefaultHNSWSnapshotIntervalSeconds = 6 * 3600 // 6h
424
+ DefaultHNSWSnapshotDisabled = true
425
+ DefaultHNSWSnapshotOnStartup = true
426
+ DefaultHNSWSnapshotMinDeltaCommitlogsNumber = 1
427
+ DefaultHNSWSnapshotMinDeltaCommitlogsSizePercentage = 5 // 5%
428
+ )
429
+
430
+ const (
431
+ DefaultReindexerGoroutinesFactor = 0.5
432
+
433
+ DefaultMapToBlockmaxProcessingDurationSeconds = 3 * 60
434
+ DefaultMapToBlockmaxPauseDurationSeconds = 60
435
+ DefaultMapToBlockmaxPerObjectDelayMilliseconds = 0
436
+ )
437
+
438
+ // MetadataServer is experimental.
439
+ type MetadataServer struct {
440
+ // When enabled startup will include a "metadata server"
441
+ // for separation of storage/compute Weaviate.
442
+ Enabled bool `json:"enabled" yaml:"enabled"`
443
+ GrpcListenAddress string `json:"grpc_listen_address" yaml:"grpc_listen_address"`
444
+ DataEventsChannelCapacity int `json:"data_events_channel_capacity" yaml:"data_events_channel_capacity"`
445
+ }
446
+
447
+ const (
448
+ DefaultMetadataServerGrpcListenAddress = ":9050"
449
+ DefaultMetadataServerDataEventsChannelCapacity = 100
450
+ )
451
+
452
+ const DefaultHNSWVisitedListPoolSize = -1 // unlimited for backward compatibility
453
+
454
+ const DefaultHNSWFlatSearchConcurrency = 1 // 1 for backward compatibility
455
+
456
+ const (
457
+ DefaultPersistenceMinMMapSize = 8192 // 8kb by default
458
+ DefaultPersistenceMaxReuseWalSize = 4096 // 4kb by default
459
+ )
460
+
461
+ func (p Persistence) Validate() error {
462
+ if p.DataPath == "" {
463
+ return fmt.Errorf("persistence.dataPath must be set")
464
+ }
465
+
466
+ return nil
467
+ }
468
+
469
+ type DiskUse struct {
470
+ WarningPercentage uint64 `json:"warning_percentage" yaml:"warning_percentage"`
471
+ ReadOnlyPercentage uint64 `json:"readonly_percentage" yaml:"readonly_percentage"`
472
+ }
473
+
474
+ func (d DiskUse) Validate() error {
475
+ if d.WarningPercentage > 100 {
476
+ return fmt.Errorf("disk_use.read_only_percentage must be between 0 and 100")
477
+ }
478
+
479
+ if d.ReadOnlyPercentage > 100 {
480
+ return fmt.Errorf("disk_use.read_only_percentage must be between 0 and 100")
481
+ }
482
+
483
+ return nil
484
+ }
485
+
486
+ type MemUse struct {
487
+ WarningPercentage uint64 `json:"warning_percentage" yaml:"warning_percentage"`
488
+ ReadOnlyPercentage uint64 `json:"readonly_percentage" yaml:"readonly_percentage"`
489
+ }
490
+
491
+ func (m MemUse) Validate() error {
492
+ if m.WarningPercentage > 100 {
493
+ return fmt.Errorf("mem_use.read_only_percentage must be between 0 and 100")
494
+ }
495
+
496
+ if m.ReadOnlyPercentage > 100 {
497
+ return fmt.Errorf("mem_use.read_only_percentage must be between 0 and 100")
498
+ }
499
+
500
+ return nil
501
+ }
502
+
503
+ type ResourceUsage struct {
504
+ DiskUse DiskUse
505
+ MemUse MemUse
506
+ }
507
+
508
+ type CORS struct {
509
+ AllowOrigin string `json:"allow_origin" yaml:"allow_origin"`
510
+ AllowMethods string `json:"allow_methods" yaml:"allow_methods"`
511
+ AllowHeaders string `json:"allow_headers" yaml:"allow_headers"`
512
+ }
513
+
514
+ const (
515
+ DefaultCORSAllowOrigin = "*"
516
+ DefaultCORSAllowMethods = "*"
517
+ DefaultCORSAllowHeaders = "Content-Type, Authorization, Batch, X-Openai-Api-Key, X-Openai-Organization, X-Openai-Baseurl, X-Anyscale-Baseurl, X-Anyscale-Api-Key, X-Cohere-Api-Key, X-Cohere-Baseurl, X-Huggingface-Api-Key, X-Azure-Api-Key, X-Azure-Deployment-Id, X-Azure-Resource-Name, X-Azure-Concurrency, X-Azure-Block-Size, X-Google-Api-Key, X-Google-Vertex-Api-Key, X-Google-Studio-Api-Key, X-Goog-Api-Key, X-Goog-Vertex-Api-Key, X-Goog-Studio-Api-Key, X-Palm-Api-Key, X-Jinaai-Api-Key, X-Aws-Access-Key, X-Aws-Secret-Key, X-Voyageai-Baseurl, X-Voyageai-Api-Key, X-Mistral-Baseurl, X-Mistral-Api-Key, X-Anthropic-Baseurl, X-Anthropic-Api-Key, X-Databricks-Endpoint, X-Databricks-Token, X-Databricks-User-Agent, X-Friendli-Token, X-Friendli-Baseurl, X-Weaviate-Api-Key, X-Weaviate-Cluster-Url, X-Nvidia-Api-Key, X-Nvidia-Baseurl"
518
+ )
519
+
520
+ func (r ResourceUsage) Validate() error {
521
+ if err := r.DiskUse.Validate(); err != nil {
522
+ return err
523
+ }
524
+
525
+ if err := r.MemUse.Validate(); err != nil {
526
+ return err
527
+ }
528
+
529
+ return nil
530
+ }
531
+
532
+ type Raft struct {
533
+ Port int
534
+ InternalRPCPort int
535
+ RPCMessageMaxSize int
536
+ Join []string
537
+
538
+ SnapshotInterval time.Duration
539
+ SnapshotThreshold uint64
540
+ TrailingLogs uint64
541
+
542
+ HeartbeatTimeout time.Duration
543
+ ElectionTimeout time.Duration
544
+ LeaderLeaseTimeout time.Duration
545
+ TimeoutsMultiplier int
546
+ ConsistencyWaitTimeout time.Duration
547
+
548
+ BootstrapTimeout time.Duration
549
+ BootstrapExpect int
550
+ MetadataOnlyVoters bool
551
+
552
+ EnableOneNodeRecovery bool
553
+ ForceOneNodeRecovery bool
554
+ }
555
+
556
+ func (r *Raft) Validate() error {
557
+ if r.Port == 0 {
558
+ return fmt.Errorf("raft.port must be greater than 0")
559
+ }
560
+
561
+ if r.InternalRPCPort == 0 {
562
+ return fmt.Errorf("raft.intra_rpc_port must be greater than 0")
563
+ }
564
+
565
+ uniqueMap := make(map[string]struct{}, len(r.Join))
566
+ updatedJoinList := make([]string, len(r.Join))
567
+ for i, nodeNameAndPort := range r.Join {
568
+ // Check that the format is correct. In case only node name is present we append the default raft port
569
+ nodeNameAndPortSplitted := strings.Split(nodeNameAndPort, ":")
570
+ if len(nodeNameAndPortSplitted) == 0 {
571
+ return fmt.Errorf("raft.join element %s has no node name", nodeNameAndPort)
572
+ } else if len(nodeNameAndPortSplitted) < 2 {
573
+ // If user only specify a node name and no port, use the default raft port
574
+ nodeNameAndPortSplitted = append(nodeNameAndPortSplitted, fmt.Sprintf("%d", DefaultRaftPort))
575
+ } else if len(nodeNameAndPortSplitted) > 2 {
576
+ return fmt.Errorf("raft.join element %s has unexpected amount of element", nodeNameAndPort)
577
+ }
578
+
579
+ // Check that the node name is unique
580
+ nodeName := nodeNameAndPortSplitted[0]
581
+ if _, ok := uniqueMap[nodeName]; ok {
582
+ return fmt.Errorf("raft.join contains the value %s multiple times. Joined nodes must have a unique id", nodeName)
583
+ } else {
584
+ uniqueMap[nodeName] = struct{}{}
585
+ }
586
+
587
+ // TODO-RAFT START
588
+ // Validate host and port
589
+
590
+ updatedJoinList[i] = strings.Join(nodeNameAndPortSplitted, ":")
591
+ }
592
+ r.Join = updatedJoinList
593
+
594
+ if r.BootstrapExpect == 0 {
595
+ return fmt.Errorf("raft.bootstrap_expect must be greater than 0")
596
+ }
597
+
598
+ if r.BootstrapExpect > len(r.Join) {
599
+ return fmt.Errorf("raft.bootstrap.expect must be less than or equal to the length of raft.join")
600
+ }
601
+
602
+ if r.SnapshotInterval <= 0 {
603
+ return fmt.Errorf("raft.bootstrap.snapshot_interval must be more than 0")
604
+ }
605
+
606
+ if r.SnapshotThreshold <= 0 {
607
+ return fmt.Errorf("raft.bootstrap.snapshot_threshold must be more than 0")
608
+ }
609
+
610
+ if r.ConsistencyWaitTimeout <= 0 {
611
+ return fmt.Errorf("raft.bootstrap.consistency_wait_timeout must be more than 0")
612
+ }
613
+
614
+ return nil
615
+ }
616
+
617
+ // GetConfigOptionGroup creates an option group for swagger
618
+ func GetConfigOptionGroup() *swag.CommandLineOptionsGroup {
619
+ commandLineOptionsGroup := swag.CommandLineOptionsGroup{
620
+ ShortDescription: "Connector, raft & MQTT config",
621
+ LongDescription: "",
622
+ Options: &Flags{},
623
+ }
624
+
625
+ return &commandLineOptionsGroup
626
+ }
627
+
628
+ // WeaviateConfig represents the used schema's
629
+ type WeaviateConfig struct {
630
+ Config Config
631
+ Hostname string
632
+ Scheme string
633
+ }
634
+
635
+ // GetHostAddress from config locations
636
+ func (f *WeaviateConfig) GetHostAddress() string {
637
+ return fmt.Sprintf("%s://%s", f.Scheme, f.Hostname)
638
+ }
639
+
640
+ // LoadConfig from config locations. The load order for configuration values if the following
641
+ // 1. Config file
642
+ // 2. Environment variables
643
+ // 3. Command line flags
644
+ // If a config option is specified multiple times in different locations, the latest one will be used in this order.
645
+ func (f *WeaviateConfig) LoadConfig(flags *swag.CommandLineOptionsGroup, logger logrus.FieldLogger) error {
646
+ // Get command line flags
647
+ configFileName := flags.Options.(*Flags).ConfigFile
648
+ // Set default if not given
649
+ if configFileName == "" {
650
+ configFileName = DefaultConfigFile
651
+ }
652
+
653
+ // Read config file
654
+ file, err := os.ReadFile(configFileName)
655
+ _ = err // explicitly ignore
656
+
657
+ // Load config from config file if present
658
+ if len(file) > 0 {
659
+ logger.WithField("action", "config_load").WithField("config_file_path", configFileName).
660
+ Info("Usage of the weaviate.conf.json file is deprecated and will be removed in the future. Please use environment variables.")
661
+ config, err := f.parseConfigFile(file, configFileName)
662
+ if err != nil {
663
+ return configErr(err)
664
+ }
665
+ f.Config = config
666
+
667
+ deprecations.Log(logger, "config-files")
668
+ }
669
+
670
+ // Load config from env
671
+ if err := FromEnv(&f.Config); err != nil {
672
+ return configErr(err)
673
+ }
674
+
675
+ // Load config from flags
676
+ f.fromFlags(flags.Options.(*Flags))
677
+
678
+ return f.Config.Validate()
679
+ }
680
+
681
+ func (f *WeaviateConfig) parseConfigFile(file []byte, name string) (Config, error) {
682
+ var config Config
683
+
684
+ m := regexp.MustCompile(`.*\.(\w+)$`).FindStringSubmatch(name)
685
+ if len(m) < 2 {
686
+ return config, fmt.Errorf("config file does not have a file ending, got '%s'", name)
687
+ }
688
+
689
+ switch m[1] {
690
+ case "json":
691
+ err := json.Unmarshal(file, &config)
692
+ if err != nil {
693
+ return config, fmt.Errorf("error unmarshalling the json config file: %w", err)
694
+ }
695
+ case "yaml":
696
+ err := yaml.Unmarshal(file, &config)
697
+ if err != nil {
698
+ return config, fmt.Errorf("error unmarshalling the yaml config file: %w", err)
699
+ }
700
+ default:
701
+ return config, fmt.Errorf("unsupported config file extension '%s', use .yaml or .json", m[1])
702
+ }
703
+
704
+ return config, nil
705
+ }
706
+
707
+ // fromFlags parses values from flags given as parameter and overrides values in the config
708
+ func (f *WeaviateConfig) fromFlags(flags *Flags) {
709
+ if flags.RaftPort > 0 {
710
+ f.Config.Raft.Port = flags.RaftPort
711
+ }
712
+ if flags.RaftInternalRPCPort > 0 {
713
+ f.Config.Raft.InternalRPCPort = flags.RaftInternalRPCPort
714
+ }
715
+ if flags.RaftRPCMessageMaxSize > 0 {
716
+ f.Config.Raft.RPCMessageMaxSize = flags.RaftRPCMessageMaxSize
717
+ }
718
+ if flags.RaftJoin != nil {
719
+ f.Config.Raft.Join = flags.RaftJoin
720
+ }
721
+ if flags.RaftBootstrapTimeout > 0 {
722
+ f.Config.Raft.BootstrapTimeout = time.Second * time.Duration(flags.RaftBootstrapTimeout)
723
+ }
724
+ if flags.RaftBootstrapExpect > 0 {
725
+ f.Config.Raft.BootstrapExpect = flags.RaftBootstrapExpect
726
+ }
727
+ if flags.RaftSnapshotInterval > 0 {
728
+ f.Config.Raft.SnapshotInterval = time.Second * time.Duration(flags.RaftSnapshotInterval)
729
+ }
730
+ if flags.RaftSnapshotThreshold > 0 {
731
+ f.Config.Raft.SnapshotThreshold = uint64(flags.RaftSnapshotThreshold)
732
+ }
733
+ if flags.RaftMetadataOnlyVoters {
734
+ f.Config.Raft.MetadataOnlyVoters = true
735
+ }
736
+
737
+ if flags.RuntimeOverridesEnabled {
738
+ f.Config.RuntimeOverrides.Enabled = flags.RuntimeOverridesEnabled
739
+ }
740
+
741
+ if flags.RuntimeOverridesPath != "" {
742
+ f.Config.RuntimeOverrides.Path = flags.RuntimeOverridesPath
743
+ }
744
+
745
+ if flags.RuntimeOverridesLoadInterval > 0 {
746
+ f.Config.RuntimeOverrides.LoadInterval = flags.RuntimeOverridesLoadInterval
747
+ }
748
+ }
749
+
750
+ func configErr(err error) error {
751
+ return fmt.Errorf("invalid config: %w", err)
752
+ }
platform/dbops/binaries/weaviate-src/usecases/config/config_handler_test.go ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package config
13
+
14
+ import (
15
+ "fmt"
16
+ "os"
17
+ "testing"
18
+
19
+ "github.com/weaviate/weaviate/usecases/auth/authorization/rbac/rbacconf"
20
+
21
+ "github.com/stretchr/testify/assert"
22
+ "github.com/stretchr/testify/require"
23
+ )
24
+
25
+ func TestConfigModules(t *testing.T) {
26
+ t.Run("invalid DefaultVectorDistanceMetric", func(t *testing.T) {
27
+ moduleProvider := &fakeModuleProvider{
28
+ valid: []string{"text2vec-contextionary"},
29
+ }
30
+ config := Config{
31
+ DefaultVectorizerModule: "text2vec-contextionary",
32
+ DefaultVectorDistanceMetric: "euclidean",
33
+ }
34
+ err := config.ValidateModules(moduleProvider)
35
+ assert.EqualError(
36
+ t,
37
+ err,
38
+ "default vector distance metric: must be one of [\"cosine\", \"dot\", \"l2-squared\", \"manhattan\",\"hamming\"]",
39
+ )
40
+ })
41
+
42
+ t.Run("invalid DefaultVectorizerModule", func(t *testing.T) {
43
+ moduleProvider := &fakeModuleProvider{
44
+ valid: []string{"text2vec-contextionary"},
45
+ }
46
+ config := Config{
47
+ DefaultVectorizerModule: "contextionary",
48
+ DefaultVectorDistanceMetric: "cosine",
49
+ }
50
+ err := config.ValidateModules(moduleProvider)
51
+ assert.EqualError(
52
+ t,
53
+ err,
54
+ "default vectorizer module: invalid vectorizer \"contextionary\"",
55
+ )
56
+ })
57
+
58
+ t.Run("all valid configurations", func(t *testing.T) {
59
+ moduleProvider := &fakeModuleProvider{
60
+ valid: []string{"text2vec-contextionary"},
61
+ }
62
+ config := Config{
63
+ DefaultVectorizerModule: "text2vec-contextionary",
64
+ DefaultVectorDistanceMetric: "l2-squared",
65
+ }
66
+ err := config.ValidateModules(moduleProvider)
67
+ assert.Nil(t, err, "should not error")
68
+ })
69
+
70
+ t.Run("without DefaultVectorDistanceMetric", func(t *testing.T) {
71
+ moduleProvider := &fakeModuleProvider{
72
+ valid: []string{"text2vec-contextionary"},
73
+ }
74
+ config := Config{
75
+ DefaultVectorizerModule: "text2vec-contextionary",
76
+ }
77
+ err := config.ValidateModules(moduleProvider)
78
+ assert.Nil(t, err, "should not error")
79
+ })
80
+
81
+ t.Run("with none DefaultVectorizerModule", func(t *testing.T) {
82
+ moduleProvider := &fakeModuleProvider{
83
+ valid: []string{"text2vec-contextionary"},
84
+ }
85
+ config := Config{
86
+ DefaultVectorizerModule: "none",
87
+ }
88
+ err := config.ValidateModules(moduleProvider)
89
+ assert.Nil(t, err, "should not error")
90
+ })
91
+
92
+ t.Run("parse config.yaml file", func(t *testing.T) {
93
+ configFileName := "config.yaml"
94
+ configYaml := `authentication:
95
+ apikey:
96
+ enabled: true
97
+ allowed_keys:
98
+ - api-key-1
99
+ users:
100
+ - readonly@weaviate.io`
101
+
102
+ filepath := fmt.Sprintf("%s/%s", t.TempDir(), configFileName)
103
+ f, err := os.Create(filepath)
104
+ require.Nil(t, err)
105
+ defer f.Close()
106
+ _, err2 := f.WriteString(configYaml)
107
+ require.Nil(t, err2)
108
+
109
+ file, err := os.ReadFile(filepath)
110
+ require.Nil(t, err)
111
+ weaviateConfig := &WeaviateConfig{}
112
+ config, err := weaviateConfig.parseConfigFile(file, configFileName)
113
+ require.Nil(t, err)
114
+
115
+ assert.True(t, config.Authentication.APIKey.Enabled)
116
+ assert.ElementsMatch(t, []string{"api-key-1"}, config.Authentication.APIKey.AllowedKeys)
117
+ assert.ElementsMatch(t, []string{"readonly@weaviate.io"}, config.Authentication.APIKey.Users)
118
+ })
119
+ }
120
+
121
+ func TestConfigParsing(t *testing.T) {
122
+ t.Run("parse config.yaml with oidc config - yaml", func(t *testing.T) {
123
+ configFileName := "config.yaml"
124
+ configYaml := `authentication:
125
+ oidc:
126
+ enabled: true
127
+ issuer: http://localhost:9090/auth/realms/weaviate
128
+ username_claim: preferred_username
129
+ groups_claim: groups
130
+ client_id: demo
131
+ skip_client_id_check: false
132
+ scopes: ['email', 'openid']
133
+ certificate: "valid-certificate"
134
+ `
135
+
136
+ filepath := fmt.Sprintf("%s/%s", t.TempDir(), configFileName)
137
+ f, err := os.Create(filepath)
138
+ require.Nil(t, err)
139
+ defer f.Close()
140
+ _, err2 := f.WriteString(configYaml)
141
+ require.Nil(t, err2)
142
+
143
+ file, err := os.ReadFile(filepath)
144
+ require.Nil(t, err)
145
+ weaviateConfig := &WeaviateConfig{}
146
+ config, err := weaviateConfig.parseConfigFile(file, configFileName)
147
+ require.Nil(t, err)
148
+
149
+ assert.True(t, config.Authentication.OIDC.Enabled)
150
+ assert.Equal(t, "http://localhost:9090/auth/realms/weaviate", config.Authentication.OIDC.Issuer.Get())
151
+ assert.Equal(t, "preferred_username", config.Authentication.OIDC.UsernameClaim.Get())
152
+ assert.Equal(t, "groups", config.Authentication.OIDC.GroupsClaim.Get())
153
+ assert.Equal(t, "demo", config.Authentication.OIDC.ClientID.Get())
154
+ assert.False(t, config.Authentication.OIDC.SkipClientIDCheck.Get())
155
+ assert.ElementsMatch(t, []string{"email", "openid"}, config.Authentication.OIDC.Scopes.Get())
156
+ assert.Equal(t, "valid-certificate", config.Authentication.OIDC.Certificate.Get())
157
+ })
158
+
159
+ t.Run("parse config.yaml with oidc config - json", func(t *testing.T) {
160
+ configFileName := "config.json"
161
+ configYaml := `{
162
+ "authentication": {
163
+ "oidc": {
164
+ "enabled": true,
165
+ "issuer": "http://localhost:9090/auth/realms/weaviate",
166
+ "username_claim": "preferred_username",
167
+ "groups_claim": "groups",
168
+ "client_id": "demo",
169
+ "skip_client_id_check": false,
170
+ "scopes": ["email", "openid"],
171
+ "certificate": "valid-certificate"
172
+ }
173
+ }
174
+ }
175
+ `
176
+
177
+ filepath := fmt.Sprintf("%s/%s", t.TempDir(), configFileName)
178
+ f, err := os.Create(filepath)
179
+ require.Nil(t, err)
180
+ defer f.Close()
181
+ _, err2 := f.WriteString(configYaml)
182
+ require.Nil(t, err2)
183
+
184
+ file, err := os.ReadFile(filepath)
185
+ require.Nil(t, err)
186
+ weaviateConfig := &WeaviateConfig{}
187
+ config, err := weaviateConfig.parseConfigFile(file, configFileName)
188
+ require.Nil(t, err)
189
+
190
+ assert.True(t, config.Authentication.OIDC.Enabled)
191
+ assert.Equal(t, "http://localhost:9090/auth/realms/weaviate", config.Authentication.OIDC.Issuer.Get())
192
+ assert.Equal(t, "preferred_username", config.Authentication.OIDC.UsernameClaim.Get())
193
+ assert.Equal(t, "groups", config.Authentication.OIDC.GroupsClaim.Get())
194
+ assert.Equal(t, "demo", config.Authentication.OIDC.ClientID.Get())
195
+ assert.False(t, config.Authentication.OIDC.SkipClientIDCheck.Get())
196
+ assert.ElementsMatch(t, []string{"email", "openid"}, config.Authentication.OIDC.Scopes.Get())
197
+ assert.Equal(t, "valid-certificate", config.Authentication.OIDC.Certificate.Get())
198
+ })
199
+
200
+ t.Run("parse config.yaml file with admin_list and read_only_users", func(t *testing.T) {
201
+ configFileName := "config.yaml"
202
+ configYaml := `authorization:
203
+ admin_list:
204
+ enabled: true
205
+ users:
206
+ - userA
207
+ read_only_users:
208
+ - userA@read.only
209
+ - userB@read.only`
210
+
211
+ filepath := fmt.Sprintf("%s/%s", t.TempDir(), configFileName)
212
+ f, err := os.Create(filepath)
213
+ require.Nil(t, err)
214
+ defer f.Close()
215
+ _, err2 := f.WriteString(configYaml)
216
+ require.Nil(t, err2)
217
+
218
+ file, err := os.ReadFile(filepath)
219
+ require.Nil(t, err)
220
+ weaviateConfig := &WeaviateConfig{}
221
+ config, err := weaviateConfig.parseConfigFile(file, configFileName)
222
+ require.Nil(t, err)
223
+
224
+ assert.True(t, config.Authorization.AdminList.Enabled)
225
+ assert.ElementsMatch(t, []string{"userA"}, config.Authorization.AdminList.Users)
226
+ assert.ElementsMatch(t, []string{"userA@read.only", "userB@read.only"}, config.Authorization.AdminList.ReadOnlyUsers)
227
+ })
228
+
229
+ t.Run("parse config.yaml file multiple keys and users", func(t *testing.T) {
230
+ configFileName := "config.yaml"
231
+ configYaml := `authentication:
232
+ apikey:
233
+ enabled: true
234
+ allowed_keys:
235
+ - api-key-1
236
+ - api-key-2
237
+ - api-key-3
238
+ users:
239
+ - user1@weaviate.io
240
+ - user2@weaviate.io`
241
+
242
+ filepath := fmt.Sprintf("%s/%s", t.TempDir(), configFileName)
243
+ f, err := os.Create(filepath)
244
+ require.Nil(t, err)
245
+ defer f.Close()
246
+ _, err2 := f.WriteString(configYaml)
247
+ require.Nil(t, err2)
248
+
249
+ file, err := os.ReadFile(filepath)
250
+ require.Nil(t, err)
251
+ weaviateConfig := &WeaviateConfig{}
252
+ config, err := weaviateConfig.parseConfigFile(file, configFileName)
253
+ require.Nil(t, err)
254
+
255
+ assert.True(t, config.Authentication.APIKey.Enabled)
256
+ assert.ElementsMatch(t, []string{"api-key-1", "api-key-2", "api-key-3"}, config.Authentication.APIKey.AllowedKeys)
257
+ assert.ElementsMatch(t, []string{"user1@weaviate.io", "user2@weaviate.io"}, config.Authentication.APIKey.Users)
258
+ })
259
+ }
260
+
261
+ func TestConfigValidation(t *testing.T) {
262
+ tests := []struct {
263
+ name string
264
+ config *Config
265
+ expected bool
266
+ }{
267
+ {
268
+ name: "invalid combination of rbac and anon access",
269
+ config: &Config{
270
+ Authentication: Authentication{AnonymousAccess: AnonymousAccess{Enabled: true}},
271
+ Authorization: Authorization{Rbac: rbacconf.Config{Enabled: true}},
272
+ },
273
+ expected: true,
274
+ },
275
+ {
276
+ name: "valid combination of anon access and no authorization",
277
+ config: &Config{
278
+ Authentication: Authentication{AnonymousAccess: AnonymousAccess{Enabled: true}},
279
+ },
280
+ expected: true,
281
+ },
282
+ }
283
+
284
+ for _, test := range tests {
285
+ t.Run(test.name, func(t *testing.T) {
286
+ err := test.config.Validate()
287
+ if test.expected {
288
+ assert.Error(t, err)
289
+ } else {
290
+ assert.NoError(t, err)
291
+ }
292
+ })
293
+ }
294
+ }
platform/dbops/binaries/weaviate-src/usecases/config/environment.go ADDED
@@ -0,0 +1,1629 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package config
13
+
14
+ import (
15
+ "fmt"
16
+ "math"
17
+ "os"
18
+ "regexp"
19
+ "slices"
20
+ "strconv"
21
+ "strings"
22
+ "time"
23
+
24
+ dbhelpers "github.com/weaviate/weaviate/adapters/repos/db/helpers"
25
+ entcfg "github.com/weaviate/weaviate/entities/config"
26
+ "github.com/weaviate/weaviate/entities/errorcompounder"
27
+ "github.com/weaviate/weaviate/entities/schema"
28
+ "github.com/weaviate/weaviate/entities/sentry"
29
+ "github.com/weaviate/weaviate/usecases/cluster"
30
+ "github.com/weaviate/weaviate/usecases/config/runtime"
31
+ )
32
+
33
+ const (
34
+ DefaultRaftPort = 8300
35
+ DefaultRaftInternalPort = 8301
36
+ DefaultRaftGRPCMaxSize = 1024 * 1024 * 1024
37
+ // DefaultRaftBootstrapTimeout is the time raft will wait to bootstrap or rejoin the cluster on a restart. We set it
38
+ // to 600 because if we're loading a large DB we need to wait for it to load before being able to join the cluster
39
+ // on a single node cluster.
40
+ DefaultRaftBootstrapTimeout = 600
41
+ DefaultRaftBootstrapExpect = 1
42
+ DefaultRaftDir = "raft"
43
+ DefaultHNSWAcornFilterRatio = 0.4
44
+
45
+ DefaultRuntimeOverridesLoadInterval = 2 * time.Minute
46
+
47
+ DefaultDistributedTasksSchedulerTickInterval = time.Minute
48
+ DefaultDistributedTasksCompletedTaskTTL = 5 * 24 * time.Hour
49
+
50
+ DefaultReplicationEngineMaxWorkers = 10
51
+ DefaultReplicaMovementMinimumAsyncWait = 60 * time.Second
52
+ DefaultReplicationEngineFileCopyWorkers = 10
53
+
54
+ DefaultTransferInactivityTimeout = 5 * time.Minute
55
+
56
+ DefaultTrackVectorDimensionsInterval = 5 * time.Minute
57
+ )
58
+
59
+ // FromEnv takes a *Config as it will respect initial config that has been
60
+ // provided by other means (e.g. a config file) and will only extend those that
61
+ // are set
62
+ func FromEnv(config *Config) error {
63
+ if entcfg.Enabled(os.Getenv("PROMETHEUS_MONITORING_ENABLED")) {
64
+ config.Monitoring.Enabled = true
65
+ config.Monitoring.Tool = "prometheus"
66
+ config.Monitoring.Port = 2112
67
+ config.Monitoring.MetricsNamespace = "" // to support backward compabitlity. Metric names won't have prefix by default.
68
+
69
+ if entcfg.Enabled(os.Getenv("PROMETHEUS_MONITORING_GROUP_CLASSES")) ||
70
+ entcfg.Enabled(os.Getenv("PROMETHEUS_MONITORING_GROUP")) {
71
+ // The variable was renamed with v1.20. Prior to v1.20 the recommended
72
+ // way to do MT was using classes. This lead to a lot of metrics which
73
+ // could be grouped with this variable. With v1.20 we introduced native
74
+ // multi-tenancy. Now all you need is a single class, but you would
75
+ // still get one set of metrics per shard. To prevent this, you still
76
+ // want to group. The new name reflects that it's just about grouping,
77
+ // not about classes or shards.
78
+ config.Monitoring.Group = true
79
+ }
80
+
81
+ if val := strings.TrimSpace(os.Getenv("PROMETHEUS_MONITORING_METRIC_NAMESPACE")); val != "" {
82
+ config.Monitoring.MetricsNamespace = val
83
+ }
84
+
85
+ if entcfg.Enabled(os.Getenv("PROMETHEUS_MONITOR_CRITICAL_BUCKETS_ONLY")) {
86
+ config.Monitoring.MonitorCriticalBucketsOnly = true
87
+ }
88
+ }
89
+
90
+ if entcfg.Enabled(os.Getenv("TRACK_VECTOR_DIMENSIONS")) {
91
+ config.TrackVectorDimensions = true
92
+ }
93
+
94
+ timeout := 30 * time.Second
95
+ opt := os.Getenv("MINIMUM_INTERNAL_TIMEOUT")
96
+ if opt != "" {
97
+ if parsed, err := time.ParseDuration(opt); err == nil {
98
+ timeout = parsed
99
+ } else {
100
+ return fmt.Errorf("parse MINIMUM_INTERNAL_TIMEOUT as duration: %w", err)
101
+ }
102
+ }
103
+
104
+ config.MinimumInternalTimeout = timeout
105
+
106
+ if v := os.Getenv("TRACK_VECTOR_DIMENSIONS_INTERVAL"); v != "" {
107
+ interval, err := time.ParseDuration(v)
108
+ if err != nil {
109
+ return fmt.Errorf("parse TRACK_VECTOR_DIMENSIONS_INTERVAL as duration: %w", err)
110
+ }
111
+ config.TrackVectorDimensionsInterval = interval
112
+ } else {
113
+ config.TrackVectorDimensionsInterval = DefaultTrackVectorDimensionsInterval
114
+ }
115
+
116
+ if entcfg.Enabled(os.Getenv("REINDEX_VECTOR_DIMENSIONS_AT_STARTUP")) {
117
+ config.ReindexVectorDimensionsAtStartup = true
118
+ }
119
+
120
+ if entcfg.Enabled(os.Getenv("DISABLE_LAZY_LOAD_SHARDS")) {
121
+ config.DisableLazyLoadShards = true
122
+ }
123
+
124
+ if entcfg.Enabled(os.Getenv("FORCE_FULL_REPLICAS_SEARCH")) {
125
+ config.ForceFullReplicasSearch = true
126
+ }
127
+
128
+ if v := os.Getenv("TRANSFER_INACTIVITY_TIMEOUT"); v != "" {
129
+ timeout, err := time.ParseDuration(v)
130
+ if err != nil {
131
+ return fmt.Errorf("parse TRANSFER_INACTIVITY_TIMEOUT as duration: %w", err)
132
+ }
133
+ config.TransferInactivityTimeout = timeout
134
+ } else {
135
+ config.TransferInactivityTimeout = DefaultTransferInactivityTimeout
136
+ }
137
+
138
+ // Recount all property lengths at startup to support accurate BM25 scoring
139
+ if entcfg.Enabled(os.Getenv("RECOUNT_PROPERTIES_AT_STARTUP")) {
140
+ config.RecountPropertiesAtStartup = true
141
+ }
142
+
143
+ if entcfg.Enabled(os.Getenv("REINDEX_SET_TO_ROARINGSET_AT_STARTUP")) {
144
+ config.ReindexSetToRoaringsetAtStartup = true
145
+ }
146
+
147
+ if entcfg.Enabled(os.Getenv("INDEX_MISSING_TEXT_FILTERABLE_AT_STARTUP")) {
148
+ config.IndexMissingTextFilterableAtStartup = true
149
+ }
150
+
151
+ cptParser := newCollectionPropsTenantsParser()
152
+
153
+ // variable expects string in format:
154
+ // "Class1:property11,property12;Class2:property21,property22"
155
+ if v := os.Getenv("REINDEX_INDEXES_AT_STARTUP"); v != "" {
156
+ cpts, err := cptParser.parse(v)
157
+ if err != nil {
158
+ return fmt.Errorf("parse REINDEX_INDEXES_AT_STARTUP as class with props: %w", err)
159
+ }
160
+
161
+ asClassesWithProps := make(map[string][]string, len(cpts))
162
+ for _, cpt := range cpts {
163
+ asClassesWithProps[cpt.Collection] = cpt.Props
164
+ }
165
+ config.ReindexIndexesAtStartup = asClassesWithProps
166
+ }
167
+
168
+ if v := os.Getenv("PROMETHEUS_MONITORING_PORT"); v != "" {
169
+ asInt, err := strconv.Atoi(v)
170
+ if err != nil {
171
+ return fmt.Errorf("parse PROMETHEUS_MONITORING_PORT as int: %w", err)
172
+ }
173
+
174
+ config.Monitoring.Port = asInt
175
+ }
176
+
177
+ if v := os.Getenv("GO_PROFILING_PORT"); v != "" {
178
+ asInt, err := strconv.Atoi(v)
179
+ if err != nil {
180
+ return fmt.Errorf("parse GO_PROFILING_PORT as int: %w", err)
181
+ }
182
+
183
+ config.Profiling.Port = asInt
184
+ }
185
+
186
+ if entcfg.Enabled(os.Getenv("AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED")) {
187
+ config.Authentication.AnonymousAccess.Enabled = true
188
+ }
189
+
190
+ if entcfg.Enabled(os.Getenv("AUTHENTICATION_OIDC_ENABLED")) {
191
+ config.Authentication.OIDC.Enabled = true
192
+ var (
193
+ skipClientCheck bool
194
+ issuer string
195
+ clientID string
196
+ scopes []string
197
+ userClaim string
198
+ groupsClaim string
199
+ certificate string
200
+ jwksUrl string
201
+ )
202
+
203
+ if entcfg.Enabled(os.Getenv("AUTHENTICATION_OIDC_SKIP_CLIENT_ID_CHECK")) {
204
+ skipClientCheck = true
205
+ }
206
+
207
+ if v := os.Getenv("AUTHENTICATION_OIDC_ISSUER"); v != "" {
208
+ issuer = v
209
+ }
210
+
211
+ if v := os.Getenv("AUTHENTICATION_OIDC_CLIENT_ID"); v != "" {
212
+ clientID = v
213
+ }
214
+
215
+ if v := os.Getenv("AUTHENTICATION_OIDC_SCOPES"); v != "" {
216
+ scopes = strings.Split(v, ",")
217
+ }
218
+
219
+ if v := os.Getenv("AUTHENTICATION_OIDC_USERNAME_CLAIM"); v != "" {
220
+ userClaim = v
221
+ }
222
+
223
+ if v := os.Getenv("AUTHENTICATION_OIDC_GROUPS_CLAIM"); v != "" {
224
+ groupsClaim = v
225
+ }
226
+
227
+ if v := os.Getenv("AUTHENTICATION_OIDC_CERTIFICATE"); v != "" {
228
+ certificate = v
229
+ }
230
+
231
+ if v := os.Getenv("AUTHENTICATION_OIDC_JWKS_URL"); v != "" {
232
+ jwksUrl = v
233
+ }
234
+
235
+ config.Authentication.OIDC.SkipClientIDCheck = runtime.NewDynamicValue(skipClientCheck)
236
+ config.Authentication.OIDC.Issuer = runtime.NewDynamicValue(issuer)
237
+ config.Authentication.OIDC.ClientID = runtime.NewDynamicValue(clientID)
238
+ config.Authentication.OIDC.Scopes = runtime.NewDynamicValue(scopes)
239
+ config.Authentication.OIDC.UsernameClaim = runtime.NewDynamicValue(userClaim)
240
+ config.Authentication.OIDC.GroupsClaim = runtime.NewDynamicValue(groupsClaim)
241
+ config.Authentication.OIDC.Certificate = runtime.NewDynamicValue(certificate)
242
+ config.Authentication.OIDC.JWKSUrl = runtime.NewDynamicValue(jwksUrl)
243
+ }
244
+
245
+ if entcfg.Enabled(os.Getenv("AUTHENTICATION_DB_USERS_ENABLED")) {
246
+ config.Authentication.DBUsers.Enabled = true
247
+ }
248
+
249
+ if entcfg.Enabled(os.Getenv("AUTHENTICATION_APIKEY_ENABLED")) {
250
+ config.Authentication.APIKey.Enabled = true
251
+
252
+ if rawKeys, ok := os.LookupEnv("AUTHENTICATION_APIKEY_ALLOWED_KEYS"); ok {
253
+ keys := strings.Split(rawKeys, ",")
254
+ config.Authentication.APIKey.AllowedKeys = keys
255
+ }
256
+
257
+ if rawUsers, ok := os.LookupEnv("AUTHENTICATION_APIKEY_USERS"); ok {
258
+ users := strings.Split(rawUsers, ",")
259
+ config.Authentication.APIKey.Users = users
260
+ }
261
+
262
+ }
263
+
264
+ if entcfg.Enabled(os.Getenv("AUTHORIZATION_ADMINLIST_ENABLED")) {
265
+ config.Authorization.AdminList.Enabled = true
266
+
267
+ usersString, ok := os.LookupEnv("AUTHORIZATION_ADMINLIST_USERS")
268
+ if ok {
269
+ config.Authorization.AdminList.Users = strings.Split(usersString, ",")
270
+ }
271
+
272
+ roUsersString, ok := os.LookupEnv("AUTHORIZATION_ADMINLIST_READONLY_USERS")
273
+ if ok {
274
+ config.Authorization.AdminList.ReadOnlyUsers = strings.Split(roUsersString, ",")
275
+ }
276
+
277
+ groupsString, ok := os.LookupEnv("AUTHORIZATION_ADMINLIST_GROUPS")
278
+ if ok {
279
+ config.Authorization.AdminList.Groups = strings.Split(groupsString, ",")
280
+ }
281
+
282
+ roGroupsString, ok := os.LookupEnv("AUTHORIZATION_ADMINLIST_READONLY_GROUPS")
283
+ if ok {
284
+ config.Authorization.AdminList.ReadOnlyGroups = strings.Split(roGroupsString, ",")
285
+ }
286
+ }
287
+
288
+ if entcfg.Enabled(os.Getenv("AUTHORIZATION_ENABLE_RBAC")) || entcfg.Enabled(os.Getenv("AUTHORIZATION_RBAC_ENABLED")) {
289
+ config.Authorization.Rbac.Enabled = true
290
+
291
+ if entcfg.Enabled(os.Getenv("AUTHORIZATION_RBAC_IP_IN_AUDIT_LOG_DISABLED")) {
292
+ config.Authorization.Rbac.IpInAuditDisabled = true
293
+ }
294
+
295
+ adminsString, ok := os.LookupEnv("AUTHORIZATION_RBAC_ROOT_USERS")
296
+ if ok {
297
+ config.Authorization.Rbac.RootUsers = strings.Split(adminsString, ",")
298
+ } else {
299
+ adminsString, ok := os.LookupEnv("AUTHORIZATION_ADMIN_USERS")
300
+ if ok {
301
+ config.Authorization.Rbac.RootUsers = strings.Split(adminsString, ",")
302
+ }
303
+ }
304
+
305
+ groupString, ok := os.LookupEnv("AUTHORIZATION_RBAC_ROOT_GROUPS")
306
+ if ok {
307
+ config.Authorization.Rbac.RootGroups = strings.Split(groupString, ",")
308
+ }
309
+
310
+ viewerGroupString, ok := os.LookupEnv("AUTHORIZATION_RBAC_READONLY_GROUPS")
311
+ if ok {
312
+ config.Authorization.Rbac.ReadOnlyGroups = strings.Split(viewerGroupString, ",")
313
+ } else {
314
+ // delete this after 1.30.11 + 1.31.3 is the minimum version in WCD
315
+ viewerGroupString, ok := os.LookupEnv("EXPERIMENTAL_AUTHORIZATION_RBAC_READONLY_ROOT_GROUPS")
316
+ if ok {
317
+ config.Authorization.Rbac.ReadOnlyGroups = strings.Split(viewerGroupString, ",")
318
+ }
319
+ }
320
+
321
+ readOnlyUsersString, ok := os.LookupEnv("EXPERIMENTAL_AUTHORIZATION_RBAC_READONLY_USERS")
322
+ if ok {
323
+ config.Authorization.Rbac.ViewerUsers = strings.Split(readOnlyUsersString, ",")
324
+ }
325
+
326
+ adminUsersString, ok := os.LookupEnv("EXPERIMENTAL_AUTHORIZATION_RBAC_ADMIN_USERS")
327
+ if ok {
328
+ config.Authorization.Rbac.AdminUsers = strings.Split(adminUsersString, ",")
329
+ }
330
+ }
331
+
332
+ config.Profiling.Disabled = entcfg.Enabled(os.Getenv("GO_PROFILING_DISABLE"))
333
+
334
+ if !config.Authentication.AnyAuthMethodSelected() {
335
+ config.Authentication = DefaultAuthentication
336
+ }
337
+
338
+ if os.Getenv("PERSISTENCE_LSM_ACCESS_STRATEGY") == "pread" {
339
+ config.AvoidMmap = true
340
+ }
341
+
342
+ if v := os.Getenv("PERSISTENCE_LSM_MAX_SEGMENT_SIZE"); v != "" {
343
+ parsed, err := parseResourceString(v)
344
+ if err != nil {
345
+ return fmt.Errorf("parse PERSISTENCE_LSM_MAX_SEGMENT_SIZE: %w", err)
346
+ }
347
+
348
+ config.Persistence.LSMMaxSegmentSize = parsed
349
+ } else {
350
+ config.Persistence.LSMMaxSegmentSize = DefaultPersistenceLSMMaxSegmentSize
351
+ }
352
+
353
+ if err := parseNonNegativeInt(
354
+ "PERSISTENCE_LSM_SEGMENTS_CLEANUP_INTERVAL_HOURS",
355
+ func(hours int) { config.Persistence.LSMSegmentsCleanupIntervalSeconds = hours * 3600 },
356
+ DefaultPersistenceLSMSegmentsCleanupIntervalSeconds,
357
+ ); err != nil {
358
+ return err
359
+ }
360
+
361
+ if entcfg.Enabled(os.Getenv("PERSISTENCE_LSM_SEPARATE_OBJECTS_COMPACTIONS")) {
362
+ config.Persistence.LSMSeparateObjectsCompactions = true
363
+ }
364
+
365
+ if entcfg.Enabled(os.Getenv("PERSISTENCE_LSM_ENABLE_SEGMENTS_CHECKSUM_VALIDATION")) {
366
+ config.Persistence.LSMEnableSegmentsChecksumValidation = true
367
+ }
368
+
369
+ if v := os.Getenv("PERSISTENCE_MIN_MMAP_SIZE"); v != "" {
370
+ parsed, err := parseResourceString(v)
371
+ if err != nil {
372
+ return fmt.Errorf("parse PERSISTENCE_MIN_MMAP_SIZE: %w", err)
373
+ }
374
+
375
+ config.Persistence.MinMMapSize = parsed
376
+ } else {
377
+ config.Persistence.MinMMapSize = DefaultPersistenceMinMMapSize
378
+ }
379
+
380
+ if entcfg.Enabled(os.Getenv("PERSISTENCE_LAZY_SEGMENTS_DISABLED")) {
381
+ config.Persistence.LazySegmentsDisabled = true
382
+ }
383
+
384
+ if entcfg.Enabled(os.Getenv("PERSISTENCE_SEGMENT_INFO_FROM_FILE_DISABLED")) {
385
+ config.Persistence.SegmentInfoIntoFileNameEnabled = false
386
+ } else {
387
+ config.Persistence.SegmentInfoIntoFileNameEnabled = true
388
+ }
389
+
390
+ if entcfg.Enabled(os.Getenv("PERSISTENCE_WRITE_METADATA_FILES_DISABLED")) {
391
+ config.Persistence.WriteMetadataFilesEnabled = false
392
+ } else {
393
+ config.Persistence.WriteMetadataFilesEnabled = true
394
+ }
395
+
396
+ if v := os.Getenv("PERSISTENCE_MAX_REUSE_WAL_SIZE"); v != "" {
397
+ parsed, err := parseResourceString(v)
398
+ if err != nil {
399
+ return fmt.Errorf("parse PERSISTENCE_MAX_REUSE_WAL_SIZE: %w", err)
400
+ }
401
+
402
+ config.Persistence.MaxReuseWalSize = parsed
403
+ } else {
404
+ config.Persistence.MaxReuseWalSize = DefaultPersistenceMaxReuseWalSize
405
+ }
406
+
407
+ if err := parseInt(
408
+ "PERSISTENCE_LSM_CYCLEMANAGER_ROUTINES_FACTOR",
409
+ func(factor int) { config.Persistence.LSMCycleManagerRoutinesFactor = factor },
410
+ DefaultPersistenceLSMCycleManagerRoutinesFactor,
411
+ ); err != nil {
412
+ return err
413
+ }
414
+
415
+ if v := os.Getenv("PERSISTENCE_HNSW_MAX_LOG_SIZE"); v != "" {
416
+ parsed, err := parseResourceString(v)
417
+ if err != nil {
418
+ return fmt.Errorf("parse PERSISTENCE_HNSW_MAX_LOG_SIZE: %w", err)
419
+ }
420
+
421
+ config.Persistence.HNSWMaxLogSize = parsed
422
+ } else {
423
+ config.Persistence.HNSWMaxLogSize = DefaultPersistenceHNSWMaxLogSize
424
+ }
425
+
426
+ // ---- HNSW snapshots ----
427
+ config.Persistence.HNSWDisableSnapshots = DefaultHNSWSnapshotDisabled
428
+ if v := os.Getenv("PERSISTENCE_HNSW_DISABLE_SNAPSHOTS"); v != "" {
429
+ config.Persistence.HNSWDisableSnapshots = entcfg.Enabled(v)
430
+ }
431
+
432
+ if err := parseNonNegativeInt(
433
+ "PERSISTENCE_HNSW_SNAPSHOT_INTERVAL_SECONDS",
434
+ func(seconds int) { config.Persistence.HNSWSnapshotIntervalSeconds = seconds },
435
+ DefaultHNSWSnapshotIntervalSeconds,
436
+ ); err != nil {
437
+ return err
438
+ }
439
+
440
+ config.Persistence.HNSWSnapshotOnStartup = DefaultHNSWSnapshotOnStartup
441
+ if v := os.Getenv("PERSISTENCE_HNSW_SNAPSHOT_ON_STARTUP"); v != "" {
442
+ config.Persistence.HNSWSnapshotOnStartup = entcfg.Enabled(v)
443
+ }
444
+
445
+ if err := parsePositiveInt(
446
+ "PERSISTENCE_HNSW_SNAPSHOT_MIN_DELTA_COMMITLOGS_NUMBER",
447
+ func(number int) { config.Persistence.HNSWSnapshotMinDeltaCommitlogsNumber = number },
448
+ DefaultHNSWSnapshotMinDeltaCommitlogsNumber,
449
+ ); err != nil {
450
+ return err
451
+ }
452
+
453
+ if err := parseNonNegativeInt(
454
+ "PERSISTENCE_HNSW_SNAPSHOT_MIN_DELTA_COMMITLOGS_SIZE_PERCENTAGE",
455
+ func(percentage int) { config.Persistence.HNSWSnapshotMinDeltaCommitlogsSizePercentage = percentage },
456
+ DefaultHNSWSnapshotMinDeltaCommitlogsSizePercentage,
457
+ ); err != nil {
458
+ return err
459
+ }
460
+ // ---- HNSW snapshots ----
461
+
462
+ defaultQuantization := ""
463
+ if v := os.Getenv("DEFAULT_QUANTIZATION"); v != "" {
464
+ defaultQuantization = strings.ToLower(v)
465
+ }
466
+ config.DefaultQuantization = runtime.NewDynamicValue(defaultQuantization)
467
+
468
+ if entcfg.Enabled(os.Getenv("INDEX_RANGEABLE_IN_MEMORY")) {
469
+ config.Persistence.IndexRangeableInMemory = true
470
+ }
471
+
472
+ if err := parseInt(
473
+ "HNSW_VISITED_LIST_POOL_MAX_SIZE",
474
+ func(size int) { config.HNSWVisitedListPoolMaxSize = size },
475
+ DefaultHNSWVisitedListPoolSize,
476
+ ); err != nil {
477
+ return err
478
+ }
479
+
480
+ if err := parseNonNegativeInt(
481
+ "HNSW_FLAT_SEARCH_CONCURRENCY",
482
+ func(val int) { config.HNSWFlatSearchConcurrency = val },
483
+ DefaultHNSWFlatSearchConcurrency,
484
+ ); err != nil {
485
+ return err
486
+ }
487
+
488
+ if err := parsePercentage(
489
+ "HNSW_ACORN_FILTER_RATIO",
490
+ func(val float64) { config.HNSWAcornFilterRatio = val },
491
+ DefaultHNSWAcornFilterRatio,
492
+ ); err != nil {
493
+ return err
494
+ }
495
+
496
+ clusterCfg, err := parseClusterConfig()
497
+ if err != nil {
498
+ return err
499
+ }
500
+ config.Cluster = clusterCfg
501
+
502
+ if v := os.Getenv("PERSISTENCE_DATA_PATH"); v != "" {
503
+ config.Persistence.DataPath = v
504
+ } else {
505
+ if config.Persistence.DataPath == "" {
506
+ config.Persistence.DataPath = DefaultPersistenceDataPath
507
+ }
508
+ }
509
+
510
+ parsePositiveFloat("REINDEXER_GOROUTINES_FACTOR",
511
+ func(val float64) { config.ReindexerGoroutinesFactor = val },
512
+ DefaultReindexerGoroutinesFactor)
513
+
514
+ if enabledForHost("REINDEX_MAP_TO_BLOCKMAX_AT_STARTUP", clusterCfg.Hostname) {
515
+ config.ReindexMapToBlockmaxAtStartup = true
516
+ if enabledForHost("REINDEX_MAP_TO_BLOCKMAX_SWAP_BUCKETS", clusterCfg.Hostname) {
517
+ config.ReindexMapToBlockmaxConfig.SwapBuckets = true
518
+ }
519
+ if enabledForHost("REINDEX_MAP_TO_BLOCKMAX_UNSWAP_BUCKETS", clusterCfg.Hostname) {
520
+ config.ReindexMapToBlockmaxConfig.UnswapBuckets = true
521
+ }
522
+ if enabledForHost("REINDEX_MAP_TO_BLOCKMAX_TIDY_BUCKETS", clusterCfg.Hostname) {
523
+ config.ReindexMapToBlockmaxConfig.TidyBuckets = true
524
+ }
525
+ if enabledForHost("REINDEX_MAP_TO_BLOCKMAX_RELOAD_SHARDS", clusterCfg.Hostname) {
526
+ config.ReindexMapToBlockmaxConfig.ReloadShards = true
527
+ }
528
+ if enabledForHost("REINDEX_MAP_TO_BLOCKMAX_ROLLBACK", clusterCfg.Hostname) {
529
+ config.ReindexMapToBlockmaxConfig.Rollback = true
530
+ }
531
+ if enabledForHost("REINDEX_MAP_TO_BLOCKMAX_CONDITIONAL_START", clusterCfg.Hostname) {
532
+ config.ReindexMapToBlockmaxConfig.ConditionalStart = true
533
+ }
534
+ parsePositiveInt("REINDEX_MAP_TO_BLOCKMAX_PROCESSING_DURATION_SECONDS",
535
+ func(val int) { config.ReindexMapToBlockmaxConfig.ProcessingDurationSeconds = val },
536
+ DefaultMapToBlockmaxProcessingDurationSeconds)
537
+ parsePositiveInt("REINDEX_MAP_TO_BLOCKMAX_PAUSE_DURATION_SECONDS",
538
+ func(val int) { config.ReindexMapToBlockmaxConfig.PauseDurationSeconds = val },
539
+ DefaultMapToBlockmaxPauseDurationSeconds)
540
+ parsePositiveInt("REINDEX_MAP_TO_BLOCKMAX_PER_OBJECT_DELAY_MILLISECONDS",
541
+ func(val int) { config.ReindexMapToBlockmaxConfig.PerObjectDelayMilliseconds = val },
542
+ DefaultMapToBlockmaxPerObjectDelayMilliseconds)
543
+
544
+ cptSelected, err := cptParser.parse(os.Getenv("REINDEX_MAP_TO_BLOCKMAX_SELECT"))
545
+ if err != nil {
546
+ return err
547
+ }
548
+ config.ReindexMapToBlockmaxConfig.Selected = cptSelected
549
+ }
550
+
551
+ if err := config.parseMemtableConfig(); err != nil {
552
+ return err
553
+ }
554
+
555
+ if err := config.parseCORSConfig(); err != nil {
556
+ return err
557
+ }
558
+
559
+ if v := os.Getenv("ORIGIN"); v != "" {
560
+ config.Origin = v
561
+ }
562
+
563
+ if v := os.Getenv("CONTEXTIONARY_URL"); v != "" {
564
+ config.Contextionary.URL = v
565
+ }
566
+
567
+ if v := os.Getenv("QUERY_DEFAULTS_LIMIT"); v != "" {
568
+ asInt, err := strconv.Atoi(v)
569
+ if err != nil {
570
+ return fmt.Errorf("parse QUERY_DEFAULTS_LIMIT as int: %w", err)
571
+ }
572
+
573
+ config.QueryDefaults.Limit = int64(asInt)
574
+ } else {
575
+ if config.QueryDefaults.Limit == 0 {
576
+ config.QueryDefaults.Limit = DefaultQueryDefaultsLimit
577
+ }
578
+ }
579
+
580
+ if v := os.Getenv("QUERY_DEFAULTS_LIMIT_GRAPHQL"); v != "" {
581
+ asInt, err := strconv.Atoi(v)
582
+ if err != nil {
583
+ return fmt.Errorf("parse QUERY_DEFAULTS_LIMIT_GRAPHQL as int: %w", err)
584
+ }
585
+
586
+ config.QueryDefaults.LimitGraphQL = int64(asInt)
587
+ } else {
588
+ if config.QueryDefaults.LimitGraphQL == 0 {
589
+ config.QueryDefaults.LimitGraphQL = DefaultQueryDefaultsLimitGraphQL
590
+ }
591
+ }
592
+
593
+ if v := os.Getenv("QUERY_MAXIMUM_RESULTS"); v != "" {
594
+ asInt, err := strconv.Atoi(v)
595
+ if err != nil {
596
+ return fmt.Errorf("parse QUERY_MAXIMUM_RESULTS as int: %w", err)
597
+ }
598
+
599
+ config.QueryMaximumResults = int64(asInt)
600
+ } else {
601
+ config.QueryMaximumResults = DefaultQueryMaximumResults
602
+ }
603
+
604
+ if v := os.Getenv("QUERY_HYBRID_MAXIMUM_RESULTS"); v != "" {
605
+ asInt, err := strconv.Atoi(v)
606
+ if err != nil {
607
+ return fmt.Errorf("parse QUERY_HYBRID_MAXIMUM_RESULTS as int: %w", err)
608
+ }
609
+ config.QueryHybridMaximumResults = int64(asInt)
610
+ } else {
611
+ config.QueryHybridMaximumResults = DefaultQueryHybridMaximumResults
612
+ }
613
+
614
+ if v := os.Getenv("QUERY_NESTED_CROSS_REFERENCE_LIMIT"); v != "" {
615
+ limit, err := strconv.ParseInt(v, 10, 64)
616
+ if err != nil {
617
+ return fmt.Errorf("parse QUERY_NESTED_CROSS_REFERENCE_LIMIT as int: %w", err)
618
+ } else if limit <= 0 {
619
+ limit = math.MaxInt
620
+ }
621
+ config.QueryNestedCrossReferenceLimit = limit
622
+ } else {
623
+ config.QueryNestedCrossReferenceLimit = DefaultQueryNestedCrossReferenceLimit
624
+ }
625
+
626
+ if err := parsePositiveInt(
627
+ "QUERY_CROSS_REFERENCE_DEPTH_LIMIT",
628
+ func(val int) { config.QueryCrossReferenceDepthLimit = val },
629
+ DefaultQueryCrossReferenceDepthLimit,
630
+ ); err != nil {
631
+ return err
632
+ }
633
+
634
+ if v := os.Getenv("MAX_IMPORT_GOROUTINES_FACTOR"); v != "" {
635
+ asFloat, err := strconv.ParseFloat(v, 64)
636
+ if err != nil {
637
+ return fmt.Errorf("parse MAX_IMPORT_GOROUTINES_FACTOR as float: %w", err)
638
+ } else if asFloat <= 0 {
639
+ return fmt.Errorf("negative MAX_IMPORT_GOROUTINES_FACTOR factor")
640
+ }
641
+
642
+ config.MaxImportGoroutinesFactor = asFloat
643
+ } else {
644
+ config.MaxImportGoroutinesFactor = DefaultMaxImportGoroutinesFactor
645
+ }
646
+
647
+ if v := os.Getenv("DEFAULT_VECTORIZER_MODULE"); v != "" {
648
+ config.DefaultVectorizerModule = v
649
+ } else {
650
+ // env not set, this could either mean, we already have a value from a file
651
+ // or we explicitly want to set the value to "none"
652
+ if config.DefaultVectorizerModule == "" {
653
+ config.DefaultVectorizerModule = VectorizerModuleNone
654
+ }
655
+ }
656
+
657
+ if v := os.Getenv("MODULES_CLIENT_TIMEOUT"); v != "" {
658
+ timeout, err := time.ParseDuration(v)
659
+ if err != nil {
660
+ return fmt.Errorf("parse MODULES_CLIENT_TIMEOUT as time.Duration: %w", err)
661
+ }
662
+ config.ModuleHttpClientTimeout = timeout
663
+ } else {
664
+ config.ModuleHttpClientTimeout = 50 * time.Second
665
+ }
666
+
667
+ if v := os.Getenv("DEFAULT_VECTOR_DISTANCE_METRIC"); v != "" {
668
+ config.DefaultVectorDistanceMetric = v
669
+ }
670
+
671
+ if v := os.Getenv("ENABLE_MODULES"); v != "" {
672
+ config.EnableModules = v
673
+ }
674
+
675
+ if entcfg.Enabled(os.Getenv("API_BASED_MODULES_DISABLED")) {
676
+ config.EnableApiBasedModules = false
677
+ } else {
678
+ config.EnableApiBasedModules = true
679
+ }
680
+
681
+ autoSchemaEnabled := true
682
+ if v := os.Getenv("AUTOSCHEMA_ENABLED"); v != "" {
683
+ autoSchemaEnabled = !(strings.ToLower(v) == "false")
684
+ }
685
+ config.AutoSchema.Enabled = runtime.NewDynamicValue(autoSchemaEnabled)
686
+
687
+ config.AutoSchema.DefaultString = schema.DataTypeText.String()
688
+ if v := os.Getenv("AUTOSCHEMA_DEFAULT_STRING"); v != "" {
689
+ config.AutoSchema.DefaultString = v
690
+ }
691
+ config.AutoSchema.DefaultNumber = "number"
692
+ if v := os.Getenv("AUTOSCHEMA_DEFAULT_NUMBER"); v != "" {
693
+ config.AutoSchema.DefaultNumber = v
694
+ }
695
+ config.AutoSchema.DefaultDate = "date"
696
+ if v := os.Getenv("AUTOSCHEMA_DEFAULT_DATE"); v != "" {
697
+ config.AutoSchema.DefaultDate = v
698
+ }
699
+
700
+ tenantActivityReadLogLevel := "debug"
701
+ if v := os.Getenv("TENANT_ACTIVITY_READ_LOG_LEVEL"); v != "" {
702
+ tenantActivityReadLogLevel = v
703
+ }
704
+ config.TenantActivityReadLogLevel = runtime.NewDynamicValue(tenantActivityReadLogLevel)
705
+
706
+ tenantActivityWriteLogLevel := "debug"
707
+ if v := os.Getenv("TENANT_ACTIVITY_WRITE_LOG_LEVEL"); v != "" {
708
+ tenantActivityWriteLogLevel = v
709
+ }
710
+ config.TenantActivityWriteLogLevel = runtime.NewDynamicValue(tenantActivityWriteLogLevel)
711
+
712
+ ru, err := parseResourceUsageEnvVars()
713
+ if err != nil {
714
+ return err
715
+ }
716
+ config.ResourceUsage = ru
717
+
718
+ if v := os.Getenv("GO_BLOCK_PROFILE_RATE"); v != "" {
719
+ asInt, err := strconv.Atoi(v)
720
+ if err != nil {
721
+ return fmt.Errorf("parse GO_BLOCK_PROFILE_RATE as int: %w", err)
722
+ }
723
+
724
+ config.Profiling.BlockProfileRate = asInt
725
+ }
726
+
727
+ if v := os.Getenv("GO_MUTEX_PROFILE_FRACTION"); v != "" {
728
+ asInt, err := strconv.Atoi(v)
729
+ if err != nil {
730
+ return fmt.Errorf("parse GO_MUTEX_PROFILE_FRACTION as int: %w", err)
731
+ }
732
+
733
+ config.Profiling.MutexProfileFraction = asInt
734
+ }
735
+
736
+ if v := os.Getenv("MAXIMUM_CONCURRENT_GET_REQUESTS"); v != "" {
737
+ asInt, err := strconv.ParseInt(v, 10, 64)
738
+ if err != nil {
739
+ return fmt.Errorf("parse MAXIMUM_CONCURRENT_GET_REQUESTS as int: %w", err)
740
+ }
741
+ config.MaximumConcurrentGetRequests = int(asInt)
742
+ } else {
743
+ config.MaximumConcurrentGetRequests = DefaultMaxConcurrentGetRequests
744
+ }
745
+
746
+ if err = parsePositiveInt(
747
+ "MAXIMUM_CONCURRENT_SHARD_LOADS",
748
+ func(val int) { config.MaximumConcurrentShardLoads = val },
749
+ DefaultMaxConcurrentShardLoads,
750
+ ); err != nil {
751
+ return err
752
+ }
753
+
754
+ if err := parsePositiveInt(
755
+ "GRPC_MAX_MESSAGE_SIZE",
756
+ func(val int) { config.GRPC.MaxMsgSize = val },
757
+ DefaultGRPCMaxMsgSize,
758
+ ); err != nil {
759
+ return err
760
+ }
761
+ if err := parsePositiveInt(
762
+ "GRPC_PORT",
763
+ func(val int) { config.GRPC.Port = val },
764
+ DefaultGRPCPort,
765
+ ); err != nil {
766
+ return err
767
+ }
768
+ config.GRPC.CertFile = ""
769
+ if v := os.Getenv("GRPC_CERT_FILE"); v != "" {
770
+ config.GRPC.CertFile = v
771
+ }
772
+ config.GRPC.KeyFile = ""
773
+ if v := os.Getenv("GRPC_KEY_FILE"); v != "" {
774
+ config.GRPC.KeyFile = v
775
+ }
776
+
777
+ config.DisableGraphQL = entcfg.Enabled(os.Getenv("DISABLE_GRAPHQL"))
778
+
779
+ if config.Raft, err = parseRAFTConfig(config.Cluster.Hostname); err != nil {
780
+ return fmt.Errorf("parse raft config: %w", err)
781
+ }
782
+
783
+ if err := parsePositiveInt(
784
+ "REPLICATION_MINIMUM_FACTOR",
785
+ func(val int) { config.Replication.MinimumFactor = val },
786
+ DefaultMinimumReplicationFactor,
787
+ ); err != nil {
788
+ return err
789
+ }
790
+
791
+ config.Replication.AsyncReplicationDisabled = runtime.NewDynamicValue(entcfg.Enabled(os.Getenv("ASYNC_REPLICATION_DISABLED")))
792
+
793
+ if v := os.Getenv("REPLICATION_FORCE_DELETION_STRATEGY"); v != "" {
794
+ config.Replication.DeletionStrategy = v
795
+ }
796
+
797
+ config.DisableTelemetry = false
798
+ if entcfg.Enabled(os.Getenv("DISABLE_TELEMETRY")) {
799
+ config.DisableTelemetry = true
800
+ }
801
+
802
+ if entcfg.Enabled(os.Getenv("HNSW_STARTUP_WAIT_FOR_VECTOR_CACHE")) {
803
+ config.HNSWStartupWaitForVectorCache = true
804
+ }
805
+
806
+ if err := parseInt(
807
+ "MAXIMUM_ALLOWED_COLLECTIONS_COUNT",
808
+ func(val int) {
809
+ config.SchemaHandlerConfig.MaximumAllowedCollectionsCount = runtime.NewDynamicValue(val)
810
+ },
811
+ DefaultMaximumAllowedCollectionsCount,
812
+ ); err != nil {
813
+ return err
814
+ }
815
+
816
+ // explicitly reset sentry config
817
+ sentry.Config = nil
818
+ config.Sentry, err = sentry.InitSentryConfig()
819
+ if err != nil {
820
+ return fmt.Errorf("parse sentry config from env: %w", err)
821
+ }
822
+
823
+ config.MetadataServer.Enabled = false
824
+ if entcfg.Enabled(os.Getenv("EXPERIMENTAL_METADATA_SERVER_ENABLED")) {
825
+ config.MetadataServer.Enabled = true
826
+ }
827
+ config.MetadataServer.GrpcListenAddress = DefaultMetadataServerGrpcListenAddress
828
+ if v := os.Getenv("EXPERIMENTAL_METADATA_SERVER_GRPC_LISTEN_ADDRESS"); v != "" {
829
+ config.MetadataServer.GrpcListenAddress = v
830
+ }
831
+ if err := parsePositiveInt(
832
+ "EXPERIMENTAL_METADATA_SERVER_DATA_EVENTS_CHANNEL_CAPACITY",
833
+ func(val int) { config.MetadataServer.DataEventsChannelCapacity = val },
834
+ DefaultMetadataServerDataEventsChannelCapacity,
835
+ ); err != nil {
836
+ return err
837
+ }
838
+
839
+ config.RuntimeOverrides.Enabled = entcfg.Enabled(os.Getenv("RUNTIME_OVERRIDES_ENABLED"))
840
+
841
+ if v := os.Getenv("RUNTIME_OVERRIDES_PATH"); v != "" {
842
+ config.RuntimeOverrides.Path = v
843
+ }
844
+
845
+ config.RuntimeOverrides.LoadInterval = DefaultRuntimeOverridesLoadInterval
846
+ if v := os.Getenv("RUNTIME_OVERRIDES_LOAD_INTERVAL"); v != "" {
847
+ interval, err := time.ParseDuration(v)
848
+ if err != nil {
849
+ return fmt.Errorf("parse RUNTIME_OVERRIDES_LOAD_INTERVAL as time.Duration: %w", err)
850
+ }
851
+ config.RuntimeOverrides.LoadInterval = interval
852
+ }
853
+
854
+ if err = parsePositiveInt(
855
+ "DISTRIBUTED_TASKS_SCHEDULER_TICK_INTERVAL_SECONDS",
856
+ func(val int) { config.DistributedTasks.SchedulerTickInterval = time.Duration(val) * time.Second },
857
+ int(DefaultDistributedTasksSchedulerTickInterval.Seconds()),
858
+ ); err != nil {
859
+ return err
860
+ }
861
+
862
+ if err = parsePositiveInt(
863
+ "DISTRIBUTED_TASKS_COMPLETED_TASK_TTL_HOURS",
864
+ func(val int) { config.DistributedTasks.CompletedTaskTTL = time.Duration(val) * time.Hour },
865
+ int(DefaultDistributedTasksCompletedTaskTTL.Hours()),
866
+ ); err != nil {
867
+ return err
868
+ }
869
+
870
+ if v := os.Getenv("DISTRIBUTED_TASKS_ENABLED"); v != "" {
871
+ config.DistributedTasks.Enabled = entcfg.Enabled(v)
872
+ }
873
+
874
+ if v := os.Getenv("REPLICA_MOVEMENT_DISABLED"); v != "" {
875
+ config.ReplicaMovementDisabled = entcfg.Enabled(v)
876
+ }
877
+
878
+ if v := os.Getenv("REPLICA_MOVEMENT_MINIMUM_ASYNC_WAIT"); v != "" {
879
+ duration, err := time.ParseDuration(v)
880
+ if err != nil {
881
+ return fmt.Errorf("parse REPLICA_MOVEMENT_MINIMUM_ASYNC_WAIT as time.Duration: %w", err)
882
+ }
883
+ if duration < 0 {
884
+ return fmt.Errorf("REPLICA_MOVEMENT_MINIMUM_ASYNC_WAIT must be a positive duration")
885
+ }
886
+ config.ReplicaMovementMinimumAsyncWait = runtime.NewDynamicValue(duration)
887
+ } else {
888
+ config.ReplicaMovementMinimumAsyncWait = runtime.NewDynamicValue(DefaultReplicaMovementMinimumAsyncWait)
889
+ }
890
+ revoctorizeCheckDisabled := false
891
+ if v := os.Getenv("REVECTORIZE_CHECK_DISABLED"); v != "" {
892
+ revoctorizeCheckDisabled = !(strings.ToLower(v) == "false")
893
+ }
894
+ config.RevectorizeCheckDisabled = runtime.NewDynamicValue(revoctorizeCheckDisabled)
895
+
896
+ querySlowLogEnabled := entcfg.Enabled(os.Getenv("QUERY_SLOW_LOG_ENABLED"))
897
+ config.QuerySlowLogEnabled = runtime.NewDynamicValue(querySlowLogEnabled)
898
+
899
+ querySlowLogThreshold := dbhelpers.DefaultSlowLogThreshold
900
+ if v := os.Getenv("QUERY_SLOW_LOG_THRESHOLD"); v != "" {
901
+ threshold, err := time.ParseDuration(v)
902
+ if err != nil {
903
+ return fmt.Errorf("parse QUERY_SLOW_LOG_THRESHOLD as time.Duration: %w", err)
904
+ }
905
+ querySlowLogThreshold = threshold
906
+ }
907
+ config.QuerySlowLogThreshold = runtime.NewDynamicValue(querySlowLogThreshold)
908
+
909
+ envName := "QUERY_BITMAP_BUFS_MAX_MEMORY"
910
+ config.QueryBitmapBufsMaxMemory = DefaultQueryBitmapBufsMaxMemory
911
+ if v := os.Getenv(envName); v != "" {
912
+ bytes, err := parseResourceString(v)
913
+ if err != nil {
914
+ return fmt.Errorf("%s: %w", envName, err)
915
+ }
916
+ config.QueryBitmapBufsMaxMemory = int(bytes)
917
+ }
918
+
919
+ envName = "QUERY_BITMAP_BUFS_MAX_BUF_SIZE"
920
+ config.QueryBitmapBufsMaxBufSize = DefaultQueryBitmapBufsMaxBufSize
921
+ if v := os.Getenv(envName); v != "" {
922
+ bytes, err := parseResourceString(v)
923
+ if err != nil {
924
+ return fmt.Errorf("%s: %w", envName, err)
925
+ }
926
+ config.QueryBitmapBufsMaxBufSize = int(bytes)
927
+ }
928
+
929
+ invertedSorterDisabled := false
930
+ if v := os.Getenv("INVERTED_SORTER_DISABLED"); v != "" {
931
+ invertedSorterDisabled = !(strings.ToLower(v) == "false")
932
+ }
933
+ config.InvertedSorterDisabled = runtime.NewDynamicValue(invertedSorterDisabled)
934
+
935
+ return nil
936
+ }
937
+
938
+ func parseRAFTConfig(hostname string) (Raft, error) {
939
+ // flag.IntVar()
940
+ cfg := Raft{
941
+ MetadataOnlyVoters: entcfg.Enabled(os.Getenv("RAFT_METADATA_ONLY_VOTERS")),
942
+ }
943
+
944
+ if err := parsePositiveInt(
945
+ "RAFT_PORT",
946
+ func(val int) { cfg.Port = val },
947
+ DefaultRaftPort,
948
+ ); err != nil {
949
+ return cfg, err
950
+ }
951
+
952
+ if err := parsePositiveInt(
953
+ "RAFT_INTERNAL_RPC_PORT",
954
+ func(val int) { cfg.InternalRPCPort = val },
955
+ DefaultRaftInternalPort,
956
+ ); err != nil {
957
+ return cfg, err
958
+ }
959
+
960
+ if err := parsePositiveInt(
961
+ "RAFT_GRPC_MESSAGE_MAX_SIZE",
962
+ func(val int) { cfg.RPCMessageMaxSize = val },
963
+ DefaultRaftGRPCMaxSize,
964
+ ); err != nil {
965
+ return cfg, err
966
+ }
967
+
968
+ parseStringList(
969
+ "RAFT_JOIN",
970
+ func(val []string) { cfg.Join = val },
971
+ // Default RAFT_JOIN must be the configured node name and the configured raft port. This allows us to have a one-node raft cluster
972
+ // able to bootstrap itself if the user doesn't pass any raft parameter.
973
+ []string{fmt.Sprintf("%s:%d", hostname, cfg.InternalRPCPort)},
974
+ )
975
+ if err := parsePositiveInt(
976
+ "RAFT_BOOTSTRAP_TIMEOUT",
977
+ func(val int) { cfg.BootstrapTimeout = time.Second * time.Duration(val) },
978
+ DefaultRaftBootstrapTimeout,
979
+ ); err != nil {
980
+ return cfg, err
981
+ }
982
+
983
+ if err := parsePositiveInt(
984
+ "RAFT_BOOTSTRAP_EXPECT",
985
+ func(val int) { cfg.BootstrapExpect = val },
986
+ DefaultRaftBootstrapExpect,
987
+ ); err != nil {
988
+ return cfg, err
989
+ }
990
+
991
+ if err := parsePositiveInt(
992
+ "RAFT_HEARTBEAT_TIMEOUT",
993
+ func(val int) { cfg.HeartbeatTimeout = time.Second * time.Duration(val) },
994
+ 1, // raft default
995
+ ); err != nil {
996
+ return cfg, err
997
+ }
998
+
999
+ if err := parsePositiveInt(
1000
+ "RAFT_ELECTION_TIMEOUT",
1001
+ func(val int) { cfg.ElectionTimeout = time.Second * time.Duration(val) },
1002
+ 1, // raft default
1003
+ ); err != nil {
1004
+ return cfg, err
1005
+ }
1006
+
1007
+ if err := parsePositiveFloat(
1008
+ "RAFT_LEADER_LEASE_TIMEOUT",
1009
+ func(val float64) { cfg.LeaderLeaseTimeout = time.Second * time.Duration(val) },
1010
+ 0.5, // raft default
1011
+ ); err != nil {
1012
+ return cfg, err
1013
+ }
1014
+
1015
+ if err := parsePositiveInt(
1016
+ "RAFT_TIMEOUTS_MULTIPLIER",
1017
+ func(val int) { cfg.TimeoutsMultiplier = val },
1018
+ 1, // raft default
1019
+ ); err != nil {
1020
+ return cfg, err
1021
+ }
1022
+
1023
+ if err := parsePositiveInt(
1024
+ "RAFT_SNAPSHOT_INTERVAL",
1025
+ func(val int) { cfg.SnapshotInterval = time.Second * time.Duration(val) },
1026
+ 120, // raft default
1027
+ ); err != nil {
1028
+ return cfg, err
1029
+ }
1030
+
1031
+ if err := parsePositiveInt(
1032
+ "RAFT_SNAPSHOT_THRESHOLD",
1033
+ func(val int) { cfg.SnapshotThreshold = uint64(val) },
1034
+ 8192, // raft default
1035
+ ); err != nil {
1036
+ return cfg, err
1037
+ }
1038
+
1039
+ if err := parsePositiveInt(
1040
+ "RAFT_TRAILING_LOGS",
1041
+ func(val int) { cfg.TrailingLogs = uint64(val) },
1042
+ 10240, // raft default
1043
+ ); err != nil {
1044
+ return cfg, err
1045
+ }
1046
+
1047
+ if err := parsePositiveInt(
1048
+ "RAFT_CONSISTENCY_WAIT_TIMEOUT",
1049
+ func(val int) { cfg.ConsistencyWaitTimeout = time.Second * time.Duration(val) },
1050
+ 10,
1051
+ ); err != nil {
1052
+ return cfg, err
1053
+ }
1054
+
1055
+ cfg.EnableOneNodeRecovery = entcfg.Enabled(os.Getenv("RAFT_ENABLE_ONE_NODE_RECOVERY"))
1056
+ cfg.ForceOneNodeRecovery = entcfg.Enabled(os.Getenv("RAFT_FORCE_ONE_NODE_RECOVERY"))
1057
+
1058
+ return cfg, nil
1059
+ }
1060
+
1061
+ func (c *Config) parseCORSConfig() error {
1062
+ if v := os.Getenv("CORS_ALLOW_ORIGIN"); v != "" {
1063
+ c.CORS.AllowOrigin = v
1064
+ } else {
1065
+ c.CORS.AllowOrigin = DefaultCORSAllowOrigin
1066
+ }
1067
+
1068
+ if v := os.Getenv("CORS_ALLOW_METHODS"); v != "" {
1069
+ c.CORS.AllowMethods = v
1070
+ } else {
1071
+ c.CORS.AllowMethods = DefaultCORSAllowMethods
1072
+ }
1073
+
1074
+ if v := os.Getenv("CORS_ALLOW_HEADERS"); v != "" {
1075
+ c.CORS.AllowHeaders = v
1076
+ } else {
1077
+ c.CORS.AllowHeaders = DefaultCORSAllowHeaders
1078
+ }
1079
+
1080
+ return nil
1081
+ }
1082
+
1083
+ func (c *Config) parseMemtableConfig() error {
1084
+ // first parse old idle name for flush value
1085
+ if err := parsePositiveInt(
1086
+ "PERSISTENCE_FLUSH_IDLE_MEMTABLES_AFTER",
1087
+ func(val int) { c.Persistence.MemtablesFlushDirtyAfter = val },
1088
+ DefaultPersistenceMemtablesFlushDirtyAfter,
1089
+ ); err != nil {
1090
+ return err
1091
+ }
1092
+ // then parse with new idle name and use previous value in case it's not set
1093
+ if err := parsePositiveInt(
1094
+ "PERSISTENCE_MEMTABLES_FLUSH_IDLE_AFTER_SECONDS",
1095
+ func(val int) { c.Persistence.MemtablesFlushDirtyAfter = val },
1096
+ c.Persistence.MemtablesFlushDirtyAfter,
1097
+ ); err != nil {
1098
+ return err
1099
+ }
1100
+ // then parse with dirty name and use idle value as fallback
1101
+ if err := parsePositiveInt(
1102
+ "PERSISTENCE_MEMTABLES_FLUSH_DIRTY_AFTER_SECONDS",
1103
+ func(val int) { c.Persistence.MemtablesFlushDirtyAfter = val },
1104
+ c.Persistence.MemtablesFlushDirtyAfter,
1105
+ ); err != nil {
1106
+ return err
1107
+ }
1108
+
1109
+ if err := parsePositiveInt(
1110
+ "PERSISTENCE_MEMTABLES_MAX_SIZE_MB",
1111
+ func(val int) { c.Persistence.MemtablesMaxSizeMB = val },
1112
+ DefaultPersistenceMemtablesMaxSize,
1113
+ ); err != nil {
1114
+ return err
1115
+ }
1116
+
1117
+ if err := parsePositiveInt(
1118
+ "PERSISTENCE_MEMTABLES_MIN_ACTIVE_DURATION_SECONDS",
1119
+ func(val int) { c.Persistence.MemtablesMinActiveDurationSeconds = val },
1120
+ DefaultPersistenceMemtablesMinDuration,
1121
+ ); err != nil {
1122
+ return err
1123
+ }
1124
+
1125
+ if err := parsePositiveInt(
1126
+ "PERSISTENCE_MEMTABLES_MAX_ACTIVE_DURATION_SECONDS",
1127
+ func(val int) { c.Persistence.MemtablesMaxActiveDurationSeconds = val },
1128
+ DefaultPersistenceMemtablesMaxDuration,
1129
+ ); err != nil {
1130
+ return err
1131
+ }
1132
+
1133
+ if err := parsePositiveInt(
1134
+ "REPLICATION_ENGINE_MAX_WORKERS",
1135
+ func(val int) { c.ReplicationEngineMaxWorkers = val },
1136
+ DefaultReplicationEngineMaxWorkers,
1137
+ ); err != nil {
1138
+ return err
1139
+ }
1140
+
1141
+ if err := parsePositiveInt(
1142
+ "REPLICATION_ENGINE_FILE_COPY_WORKERS",
1143
+ func(val int) { c.ReplicationEngineFileCopyWorkers = val },
1144
+ DefaultReplicationEngineFileCopyWorkers,
1145
+ ); err != nil {
1146
+ return err
1147
+ }
1148
+
1149
+ return nil
1150
+ }
1151
+
1152
+ func parsePercentage(envName string, cb func(val float64), defaultValue float64) error {
1153
+ return parseFloat64(envName, defaultValue, func(val float64) error {
1154
+ if val < 0 || val > 1 {
1155
+ return fmt.Errorf("%s must be between 0 and 1", envName)
1156
+ }
1157
+ return nil
1158
+ }, cb)
1159
+ }
1160
+
1161
+ func parseFloat64(envName string, defaultValue float64, verify func(val float64) error, cb func(val float64)) error {
1162
+ var err error
1163
+ asFloat := defaultValue
1164
+
1165
+ if v := os.Getenv(envName); v != "" {
1166
+ asFloat, err = strconv.ParseFloat(v, 64)
1167
+ if err != nil {
1168
+ return fmt.Errorf("parse %s as float64: %w", envName, err)
1169
+ }
1170
+ if err = verify(asFloat); err != nil {
1171
+ return err
1172
+ }
1173
+ }
1174
+
1175
+ cb(asFloat)
1176
+ return nil
1177
+ }
1178
+
1179
+ func parseInt(envName string, cb func(val int), defaultValue int) error {
1180
+ return parseIntVerify(envName, defaultValue, cb, func(val int, envName string) error { return nil })
1181
+ }
1182
+
1183
+ func parsePositiveInt(envName string, cb func(val int), defaultValue int) error {
1184
+ return parseIntVerify(envName, defaultValue, cb, func(val int, envName string) error {
1185
+ if val <= 0 {
1186
+ return fmt.Errorf("%s must be an integer greater than 0. Got: %v", envName, val)
1187
+ }
1188
+ return nil
1189
+ })
1190
+ }
1191
+
1192
+ func parseNonNegativeInt(envName string, cb func(val int), defaultValue int) error {
1193
+ return parseIntVerify(envName, defaultValue, cb, func(val int, envName string) error {
1194
+ if val < 0 {
1195
+ return fmt.Errorf("%s must be an integer greater than or equal 0. Got %v", envName, val)
1196
+ }
1197
+ return nil
1198
+ })
1199
+ }
1200
+
1201
+ func parseIntVerify(envName string, defaultValue int, cb func(val int), verify func(val int, envName string) error) error {
1202
+ var err error
1203
+ asInt := defaultValue
1204
+
1205
+ if v := os.Getenv(envName); v != "" {
1206
+ asInt, err = strconv.Atoi(v)
1207
+ if err != nil {
1208
+ return fmt.Errorf("parse %s as int: %w", envName, err)
1209
+ }
1210
+ if err = verify(asInt, envName); err != nil {
1211
+ return err
1212
+ }
1213
+ }
1214
+
1215
+ cb(asInt)
1216
+ return nil
1217
+ }
1218
+
1219
+ // func parseFloat(envName string, cb func(val float64), defaultValue float64) error {
1220
+ // return parseFloatVerify(envName, defaultValue, cb, func(val float64) error { return nil })
1221
+ // }
1222
+
1223
+ func parsePositiveFloat(envName string, cb func(val float64), defaultValue float64) error {
1224
+ return parseFloatVerify(envName, defaultValue, cb, func(val float64) error {
1225
+ if val <= 0 {
1226
+ return fmt.Errorf("%s must be a float greater than 0. Got: %v", envName, val)
1227
+ }
1228
+ return nil
1229
+ })
1230
+ }
1231
+
1232
+ // func parseNonNegativeFloat(envName string, cb func(val float64), defaultValue float64) error {
1233
+ // return parseFloatVerify(envName, defaultValue, cb, func(val float64) error {
1234
+ // if val < 0 {
1235
+ // return fmt.Errorf("%s must be a float greater than or equal 0. Got %v", envName, val)
1236
+ // }
1237
+ // return nil
1238
+ // })
1239
+ // }
1240
+
1241
+ func parseFloatVerify(envName string, defaultValue float64, cb func(val float64), verify func(val float64) error) error {
1242
+ var err error
1243
+ asFloat := defaultValue
1244
+
1245
+ if v := os.Getenv(envName); v != "" {
1246
+ asFloat, err = strconv.ParseFloat(v, 64)
1247
+ if err != nil {
1248
+ return fmt.Errorf("parse %s as float: %w", envName, err)
1249
+ }
1250
+ if err = verify(asFloat); err != nil {
1251
+ return err
1252
+ }
1253
+ }
1254
+
1255
+ cb(asFloat)
1256
+ return nil
1257
+ }
1258
+
1259
+ const (
1260
+ DefaultQueryMaximumResults = int64(10000)
1261
+ DefaultQueryHybridMaximumResults = int64(100)
1262
+ // DefaultQueryNestedCrossReferenceLimit describes the max number of nested crossrefs returned for a query
1263
+ DefaultQueryNestedCrossReferenceLimit = int64(100000)
1264
+ // DefaultQueryCrossReferenceDepthLimit describes the max depth of nested crossrefs in a query
1265
+ DefaultQueryCrossReferenceDepthLimit = 5
1266
+
1267
+ DefaultQueryBitmapBufsMaxBufSize = 1 << 25 // 32MB
1268
+ DefaultQueryBitmapBufsMaxMemory = 1 << 27 // 128MB (2x 32MB, 2x 16MB, 2x 8MB, 2x 4MB, 4x 2MB)
1269
+ )
1270
+
1271
+ const (
1272
+ DefaultPersistenceMemtablesFlushDirtyAfter = 60
1273
+ DefaultPersistenceMemtablesMaxSize = 200
1274
+ DefaultPersistenceMemtablesMinDuration = 15
1275
+ DefaultPersistenceMemtablesMaxDuration = 45
1276
+ DefaultMaxConcurrentGetRequests = 0
1277
+ DefaultMaxConcurrentShardLoads = 500
1278
+ DefaultGRPCPort = 50051
1279
+ DefaultGRPCMaxMsgSize = 104858000 // 100 * 1024 * 1024 + 400
1280
+ DefaultMinimumReplicationFactor = 1
1281
+ DefaultMaximumAllowedCollectionsCount = -1 // unlimited
1282
+ )
1283
+
1284
+ const VectorizerModuleNone = "none"
1285
+
1286
+ // DefaultGossipBindPort uses the hashicorp/memberlist default
1287
+ // port value assigned with the use of DefaultLocalConfig
1288
+ const DefaultGossipBindPort = 7946
1289
+
1290
+ // TODO: This should be retrieved dynamically from all installed modules
1291
+ const VectorizerModuleText2VecContextionary = "text2vec-contextionary"
1292
+
1293
+ func parseStringList(varName string, cb func(val []string), defaultValue []string) {
1294
+ if v := os.Getenv(varName); v != "" {
1295
+ cb(strings.Split(v, ","))
1296
+ } else {
1297
+ cb(defaultValue)
1298
+ }
1299
+ }
1300
+
1301
+ func parseResourceUsageEnvVars() (ResourceUsage, error) {
1302
+ ru := ResourceUsage{}
1303
+
1304
+ if v := os.Getenv("DISK_USE_WARNING_PERCENTAGE"); v != "" {
1305
+ asUint, err := strconv.ParseUint(v, 10, 64)
1306
+ if err != nil {
1307
+ return ru, fmt.Errorf("parse DISK_USE_WARNING_PERCENTAGE as uint: %w", err)
1308
+ }
1309
+ ru.DiskUse.WarningPercentage = asUint
1310
+ } else {
1311
+ ru.DiskUse.WarningPercentage = DefaultDiskUseWarningPercentage
1312
+ }
1313
+
1314
+ if v := os.Getenv("DISK_USE_READONLY_PERCENTAGE"); v != "" {
1315
+ asUint, err := strconv.ParseUint(v, 10, 64)
1316
+ if err != nil {
1317
+ return ru, fmt.Errorf("parse DISK_USE_READONLY_PERCENTAGE as uint: %w", err)
1318
+ }
1319
+ ru.DiskUse.ReadOnlyPercentage = asUint
1320
+ } else {
1321
+ ru.DiskUse.ReadOnlyPercentage = DefaultDiskUseReadonlyPercentage
1322
+ }
1323
+
1324
+ if v := os.Getenv("MEMORY_WARNING_PERCENTAGE"); v != "" {
1325
+ asUint, err := strconv.ParseUint(v, 10, 64)
1326
+ if err != nil {
1327
+ return ru, fmt.Errorf("parse MEMORY_WARNING_PERCENTAGE as uint: %w", err)
1328
+ }
1329
+ ru.MemUse.WarningPercentage = asUint
1330
+ } else {
1331
+ ru.MemUse.WarningPercentage = DefaultMemUseWarningPercentage
1332
+ }
1333
+
1334
+ if v := os.Getenv("MEMORY_READONLY_PERCENTAGE"); v != "" {
1335
+ asUint, err := strconv.ParseUint(v, 10, 64)
1336
+ if err != nil {
1337
+ return ru, fmt.Errorf("parse MEMORY_READONLY_PERCENTAGE as uint: %w", err)
1338
+ }
1339
+ ru.MemUse.ReadOnlyPercentage = asUint
1340
+ } else {
1341
+ ru.MemUse.ReadOnlyPercentage = DefaultMemUseReadonlyPercentage
1342
+ }
1343
+
1344
+ return ru, nil
1345
+ }
1346
+
1347
+ func parseClusterConfig() (cluster.Config, error) {
1348
+ cfg := cluster.Config{}
1349
+
1350
+ // by default memberlist assigns hostname to os.Hostname() incase hostname is empty
1351
+ // ref: https://github.com/hashicorp/memberlist/blob/3f82dc10a89f82efe300228752f7077d0d9f87e4/config.go#L303
1352
+ // it's handled at parseClusterConfig step to be consistent from the config start point and conveyed to all
1353
+ // underlying functions see parseRAFTConfig(..) for example
1354
+ cfg.Hostname = os.Getenv("CLUSTER_HOSTNAME")
1355
+ if cfg.Hostname == "" {
1356
+ cfg.Hostname, _ = os.Hostname()
1357
+ }
1358
+ cfg.Join = os.Getenv("CLUSTER_JOIN")
1359
+
1360
+ advertiseAddr, advertiseAddrSet := os.LookupEnv("CLUSTER_ADVERTISE_ADDR")
1361
+ advertisePort, advertisePortSet := os.LookupEnv("CLUSTER_ADVERTISE_PORT")
1362
+
1363
+ cfg.Localhost = entcfg.Enabled(os.Getenv("CLUSTER_IN_LOCALHOST"))
1364
+ gossipBind, gossipBindSet := os.LookupEnv("CLUSTER_GOSSIP_BIND_PORT")
1365
+ dataBind, dataBindSet := os.LookupEnv("CLUSTER_DATA_BIND_PORT")
1366
+
1367
+ if advertiseAddrSet {
1368
+ cfg.AdvertiseAddr = advertiseAddr
1369
+ }
1370
+
1371
+ if advertisePortSet {
1372
+ asInt, err := strconv.Atoi(advertisePort)
1373
+ if err != nil {
1374
+ return cfg, fmt.Errorf("parse CLUSTER_ADVERTISE_PORT as int: %w", err)
1375
+ }
1376
+ cfg.AdvertisePort = asInt
1377
+ }
1378
+
1379
+ if gossipBindSet {
1380
+ asInt, err := strconv.Atoi(gossipBind)
1381
+ if err != nil {
1382
+ return cfg, fmt.Errorf("parse CLUSTER_GOSSIP_BIND_PORT as int: %w", err)
1383
+ }
1384
+ cfg.GossipBindPort = asInt
1385
+ } else {
1386
+ cfg.GossipBindPort = DefaultGossipBindPort
1387
+ }
1388
+
1389
+ if dataBindSet {
1390
+ asInt, err := strconv.Atoi(dataBind)
1391
+ if err != nil {
1392
+ return cfg, fmt.Errorf("parse CLUSTER_DATA_BIND_PORT as int: %w", err)
1393
+ }
1394
+ cfg.DataBindPort = asInt
1395
+ } else {
1396
+ // it is convention in this server that the data bind point is
1397
+ // equal to the data bind port + 1
1398
+ cfg.DataBindPort = cfg.GossipBindPort + 1
1399
+ }
1400
+
1401
+ cfg.IgnoreStartupSchemaSync = entcfg.Enabled(
1402
+ os.Getenv("CLUSTER_IGNORE_SCHEMA_SYNC"))
1403
+ cfg.SkipSchemaSyncRepair = entcfg.Enabled(
1404
+ os.Getenv("CLUSTER_SKIP_SCHEMA_REPAIR"))
1405
+
1406
+ basicAuthUsername := os.Getenv("CLUSTER_BASIC_AUTH_USERNAME")
1407
+ basicAuthPassword := os.Getenv("CLUSTER_BASIC_AUTH_PASSWORD")
1408
+
1409
+ cfg.AuthConfig = cluster.AuthConfig{
1410
+ BasicAuth: cluster.BasicAuth{
1411
+ Username: basicAuthUsername,
1412
+ Password: basicAuthPassword,
1413
+ },
1414
+ }
1415
+
1416
+ cfg.FastFailureDetection = entcfg.Enabled(os.Getenv("FAST_FAILURE_DETECTION"))
1417
+
1418
+ // MAINTENANCE_NODES is experimental and subject to removal/change. It is an optional, comma
1419
+ // separated list of hostnames that are in maintenance mode. In maintenance mode, the node will
1420
+ // return an error for all data requests, but will still participate in the raft cluster and
1421
+ // schema operations. This can be helpful is a node is too overwhelmed by startup tasks to handle
1422
+ // data requests and you need to start up the node to give it time to "catch up". Note that in
1423
+ // general one should not use the MaintenanceNodes field directly, but since we don't have
1424
+ // access to the State here and the cluster has not yet initialized, we have to set it here.
1425
+
1426
+ // avoid the case where strings.Split creates a slice with only the empty string as I think
1427
+ // that will be confusing for future code. eg ([]string{""}) instead of an empty slice ([]string{}).
1428
+ // https://go.dev/play/p/3BDp1vhbkYV shows len(1) when m = "".
1429
+ cfg.MaintenanceNodes = []string{}
1430
+ if m := os.Getenv("MAINTENANCE_NODES"); m != "" {
1431
+ for _, node := range strings.Split(m, ",") {
1432
+ if node != "" {
1433
+ cfg.MaintenanceNodes = append(cfg.MaintenanceNodes, node)
1434
+ }
1435
+ }
1436
+ }
1437
+
1438
+ return cfg, nil
1439
+ }
1440
+
1441
+ func enabledForHost(envName string, localHostname string) bool {
1442
+ if v := os.Getenv(envName); v != "" {
1443
+ if entcfg.Enabled(v) {
1444
+ return true
1445
+ }
1446
+ return slices.Contains(strings.Split(v, ","), localHostname)
1447
+ }
1448
+ return false
1449
+ }
1450
+
1451
+ /*
1452
+ parses variable of format "colName1:propNames1:tenantNames1;colName2:propNames2:tenantNames2"
1453
+ propNames = prop1,prop2,...
1454
+ tenantNames = tenant1,tenant2,...
1455
+
1456
+ examples:
1457
+ - collection:
1458
+ "ColName1"
1459
+ "ColName1;ColName2"
1460
+ - collection + properties:
1461
+ "ColName1:propName1"
1462
+ "ColName1:propName1,propName2;ColName2:propName3"
1463
+ - collection + properties + tenants/shards:
1464
+ "ColName1:propName1:tenantName1,tenantName2"
1465
+ "ColName1:propName1:tenantName1,tenantName2;ColName2:propName2,propName3:tenantName3"
1466
+ - collection + tenants/shards:
1467
+ "ColName1::tenantName1"
1468
+ "ColName1::tenantName1,tenantName2;ColName2::tenantName3"
1469
+ */
1470
+ type collectionPropsTenantsParser struct {
1471
+ regexpCollection *regexp.Regexp
1472
+ regexpProp *regexp.Regexp
1473
+ regexpTenant *regexp.Regexp
1474
+ }
1475
+
1476
+ func newCollectionPropsTenantsParser() *collectionPropsTenantsParser {
1477
+ return &collectionPropsTenantsParser{
1478
+ regexpCollection: regexp.MustCompile(`^` + schema.ClassNameRegexCore + `$`),
1479
+ regexpProp: regexp.MustCompile(`^` + schema.PropertyNameRegex + `$`),
1480
+ regexpTenant: regexp.MustCompile(`^` + schema.ShardNameRegexCore + `$`),
1481
+ }
1482
+ }
1483
+
1484
+ func (p *collectionPropsTenantsParser) parse(v string) ([]CollectionPropsTenants, error) {
1485
+ if v = strings.TrimSpace(v); v == "" {
1486
+ return []CollectionPropsTenants{}, nil
1487
+ }
1488
+
1489
+ split := strings.Split(v, ";")
1490
+ count := len(split)
1491
+ cpts := make([]CollectionPropsTenants, 0, count)
1492
+ uniqMapIdx := make(map[string]int, count)
1493
+
1494
+ ec := errorcompounder.New()
1495
+ for _, single := range split {
1496
+ if single = strings.TrimSpace(single); single != "" {
1497
+ if cpt, err := p.parseSingle(single); err != nil {
1498
+ ec.Add(fmt.Errorf("parse '%s': %w", single, err))
1499
+ } else {
1500
+ if prevIdx, ok := uniqMapIdx[cpt.Collection]; ok {
1501
+ cpts[prevIdx] = p.mergeCpt(cpts[prevIdx], cpt)
1502
+ } else {
1503
+ uniqMapIdx[cpt.Collection] = len(cpts)
1504
+ cpts = append(cpts, cpt)
1505
+ }
1506
+ }
1507
+ }
1508
+ }
1509
+
1510
+ return cpts, ec.ToError()
1511
+ }
1512
+
1513
+ func (p *collectionPropsTenantsParser) parseSingle(single string) (CollectionPropsTenants, error) {
1514
+ split := strings.Split(single, ":")
1515
+ empty := CollectionPropsTenants{}
1516
+
1517
+ switch count := len(split); count {
1518
+ case 1:
1519
+ collection, err := p.parseCollection(split[0])
1520
+ if err != nil {
1521
+ return empty, err
1522
+ }
1523
+ return CollectionPropsTenants{Collection: collection}, nil
1524
+
1525
+ case 2:
1526
+ collection, err := p.parseCollection(split[0])
1527
+ if err != nil {
1528
+ return empty, err
1529
+ }
1530
+ props, err := p.parseProps(split[1])
1531
+ if err != nil {
1532
+ return empty, err
1533
+ }
1534
+ return CollectionPropsTenants{Collection: collection, Props: props}, nil
1535
+
1536
+ case 3:
1537
+ collection, err := p.parseCollection(split[0])
1538
+ if err != nil {
1539
+ return empty, err
1540
+ }
1541
+ props, err := p.parseProps(split[1])
1542
+ if err != nil {
1543
+ return empty, err
1544
+ }
1545
+ tenants, err := p.parseTenants(split[2])
1546
+ if err != nil {
1547
+ return empty, err
1548
+ }
1549
+ return CollectionPropsTenants{Collection: collection, Props: props, Tenants: tenants}, nil
1550
+
1551
+ default:
1552
+ return empty, fmt.Errorf("too many parts in '%s'. Expected 1-3, got %d", single, count)
1553
+ }
1554
+ }
1555
+
1556
+ func (p *collectionPropsTenantsParser) parseCollection(collection string) (string, error) {
1557
+ collection = strings.TrimSpace(collection)
1558
+ if collection == "" {
1559
+ return "", fmt.Errorf("missing collection name")
1560
+ }
1561
+ if !p.regexpCollection.MatchString(collection) {
1562
+ return "", fmt.Errorf("invalid collection name '%s'. Does not match regexp", collection)
1563
+ }
1564
+ return collection, nil
1565
+ }
1566
+
1567
+ func (p *collectionPropsTenantsParser) parseProps(propsStr string) ([]string, error) {
1568
+ return p.parseElems(propsStr, p.regexpProp, "invalid property name '%s'. Does not match regexp")
1569
+ }
1570
+
1571
+ func (p *collectionPropsTenantsParser) parseTenants(tenantsStr string) ([]string, error) {
1572
+ return p.parseElems(tenantsStr, p.regexpTenant, "invalid tenant/shard name '%s'. Does not match regexp")
1573
+ }
1574
+
1575
+ func (p *collectionPropsTenantsParser) parseElems(str string, reg *regexp.Regexp, errMsg string) ([]string, error) {
1576
+ split := strings.Split(str, ",")
1577
+ count := len(split)
1578
+ elems := make([]string, 0, count)
1579
+ uniqMap := make(map[string]struct{}, count)
1580
+
1581
+ ec := errorcompounder.New()
1582
+ for _, elem := range split {
1583
+ if elem = strings.TrimSpace(elem); elem != "" {
1584
+ if reg.MatchString(elem) {
1585
+ if _, ok := uniqMap[elem]; !ok {
1586
+ elems = append(elems, elem)
1587
+ uniqMap[elem] = struct{}{}
1588
+ }
1589
+ } else {
1590
+ ec.Add(fmt.Errorf(errMsg, elem))
1591
+ }
1592
+ }
1593
+ }
1594
+
1595
+ if len(elems) == 0 {
1596
+ return nil, ec.ToError()
1597
+ }
1598
+ return elems, ec.ToError()
1599
+ }
1600
+
1601
+ func (p *collectionPropsTenantsParser) mergeCpt(cptDst, cptSrc CollectionPropsTenants) CollectionPropsTenants {
1602
+ if cptDst.Collection != cptSrc.Collection {
1603
+ return cptDst
1604
+ }
1605
+ cptDst.Props = p.mergeUniqueElems(cptDst.Props, cptSrc.Props)
1606
+ cptDst.Tenants = p.mergeUniqueElems(cptDst.Tenants, cptSrc.Tenants)
1607
+ return cptDst
1608
+ }
1609
+
1610
+ func (p *collectionPropsTenantsParser) mergeUniqueElems(uniqueA, uniqueB []string) []string {
1611
+ lA, lB := len(uniqueA), len(uniqueB)
1612
+ if lB == 0 {
1613
+ return uniqueA
1614
+ }
1615
+ if lA == 0 {
1616
+ return uniqueB
1617
+ }
1618
+
1619
+ uniqMapA := make(map[string]struct{}, lA)
1620
+ for _, a := range uniqueA {
1621
+ uniqMapA[a] = struct{}{}
1622
+ }
1623
+ for _, b := range uniqueB {
1624
+ if _, ok := uniqMapA[b]; !ok {
1625
+ uniqueA = append(uniqueA, b)
1626
+ }
1627
+ }
1628
+ return uniqueA
1629
+ }
platform/dbops/binaries/weaviate-src/usecases/config/environment_test.go ADDED
@@ -0,0 +1,1293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package config
13
+
14
+ import (
15
+ "fmt"
16
+ "os"
17
+ "testing"
18
+
19
+ "github.com/stretchr/testify/assert"
20
+ "github.com/stretchr/testify/require"
21
+
22
+ "github.com/weaviate/weaviate/usecases/cluster"
23
+ "github.com/weaviate/weaviate/usecases/config/runtime"
24
+ )
25
+
26
+ const DefaultGoroutineFactor = 1.5
27
+
28
+ func TestEnvironmentImportGoroutineFactor(t *testing.T) {
29
+ factors := []struct {
30
+ name string
31
+ goroutineFactor []string
32
+ expected float64
33
+ expectedErr bool
34
+ }{
35
+ {"Valid factor", []string{"1"}, 1, false},
36
+ {"Low factor", []string{"0.5"}, 0.5, false},
37
+ {"not given", []string{}, DefaultGoroutineFactor, false},
38
+ {"High factor", []string{"5"}, 5, false},
39
+ {"invalid factor", []string{"-1"}, -1, true},
40
+ {"not parsable", []string{"I'm not a number"}, -1, true},
41
+ }
42
+ for _, tt := range factors {
43
+ t.Run(tt.name, func(t *testing.T) {
44
+ if len(tt.goroutineFactor) == 1 {
45
+ t.Setenv("MAX_IMPORT_GOROUTINES_FACTOR", tt.goroutineFactor[0])
46
+ }
47
+ conf := Config{}
48
+ err := FromEnv(&conf)
49
+
50
+ if tt.expectedErr {
51
+ require.NotNil(t, err)
52
+ } else {
53
+ require.Equal(t, tt.expected, conf.MaxImportGoroutinesFactor)
54
+ }
55
+ })
56
+ }
57
+ }
58
+
59
+ func TestEnvironmentSetFlushAfter_AllNames(t *testing.T) {
60
+ factors := []struct {
61
+ name string
62
+ flushAfter []string
63
+ expected int
64
+ expectedErr bool
65
+ }{
66
+ {"Valid", []string{"1"}, 1, false},
67
+ {"not given", []string{}, DefaultPersistenceMemtablesFlushDirtyAfter, false},
68
+ {"invalid factor", []string{"-1"}, -1, true},
69
+ {"zero factor", []string{"0"}, -1, true},
70
+ {"not parsable", []string{"I'm not a number"}, -1, true},
71
+ }
72
+ envNames := []struct {
73
+ name string
74
+ envName string
75
+ }{
76
+ {name: "fallback idle (1st)", envName: "PERSISTENCE_FLUSH_IDLE_MEMTABLES_AFTER"},
77
+ {name: "fallback idle (2nd)", envName: "PERSISTENCE_MEMTABLES_FLUSH_IDLE_AFTER_SECONDS"},
78
+ {name: "dirty", envName: "PERSISTENCE_MEMTABLES_FLUSH_DIRTY_AFTER_SECONDS"},
79
+ }
80
+
81
+ for _, n := range envNames {
82
+ t.Run(n.name, func(t *testing.T) {
83
+ for _, tt := range factors {
84
+ t.Run(tt.name, func(t *testing.T) {
85
+ if len(tt.flushAfter) == 1 {
86
+ t.Setenv(n.envName, tt.flushAfter[0])
87
+ }
88
+ conf := Config{}
89
+ err := FromEnv(&conf)
90
+
91
+ if tt.expectedErr {
92
+ require.NotNil(t, err)
93
+ } else {
94
+ require.Equal(t, tt.expected, conf.Persistence.MemtablesFlushDirtyAfter)
95
+ }
96
+ })
97
+ }
98
+ })
99
+ }
100
+ }
101
+
102
+ func TestEnvironmentFlushConflictingValues(t *testing.T) {
103
+ // if all 3 variable names are used, the newest variable name
104
+ // should be taken into consideration
105
+ os.Clearenv()
106
+ t.Setenv("PERSISTENCE_FLUSH_IDLE_MEMTABLES_AFTER", "16")
107
+ t.Setenv("PERSISTENCE_MEMTABLES_FLUSH_IDLE_AFTER_SECONDS", "17")
108
+ t.Setenv("PERSISTENCE_MEMTABLES_FLUSH_DIRTY_AFTER_SECONDS", "18")
109
+ conf := Config{}
110
+ err := FromEnv(&conf)
111
+ require.Nil(t, err)
112
+
113
+ assert.Equal(t, 18, conf.Persistence.MemtablesFlushDirtyAfter)
114
+ }
115
+
116
+ func TestEnvironmentPersistence_dataPath(t *testing.T) {
117
+ factors := []struct {
118
+ name string
119
+ value []string
120
+ config Config
121
+ expected string
122
+ }{
123
+ {
124
+ name: "given",
125
+ value: []string{"/var/lib/weaviate"},
126
+ config: Config{},
127
+ expected: "/var/lib/weaviate",
128
+ },
129
+ {
130
+ name: "given with config set",
131
+ value: []string{"/var/lib/weaviate"},
132
+ config: Config{
133
+ Persistence: Persistence{
134
+ DataPath: "/var/data/weaviate",
135
+ },
136
+ },
137
+ expected: "/var/lib/weaviate",
138
+ },
139
+ {
140
+ name: "not given",
141
+ value: []string{},
142
+ config: Config{},
143
+ expected: DefaultPersistenceDataPath,
144
+ },
145
+ {
146
+ name: "not given with config set",
147
+ value: []string{},
148
+ config: Config{
149
+ Persistence: Persistence{
150
+ DataPath: "/var/data/weaviate",
151
+ },
152
+ },
153
+ expected: "/var/data/weaviate",
154
+ },
155
+ }
156
+ for _, tt := range factors {
157
+ t.Run(tt.name, func(t *testing.T) {
158
+ if len(tt.value) == 1 {
159
+ t.Setenv("PERSISTENCE_DATA_PATH", tt.value[0])
160
+ }
161
+ conf := tt.config
162
+ err := FromEnv(&conf)
163
+ require.Nil(t, err)
164
+ require.Equal(t, tt.expected, conf.Persistence.DataPath)
165
+ })
166
+ }
167
+ }
168
+
169
+ func TestEnvironmentMemtable_MaxSize(t *testing.T) {
170
+ factors := []struct {
171
+ name string
172
+ value []string
173
+ expected int
174
+ expectedErr bool
175
+ }{
176
+ {"Valid", []string{"100"}, 100, false},
177
+ {"not given", []string{}, DefaultPersistenceMemtablesMaxSize, false},
178
+ {"invalid factor", []string{"-1"}, -1, true},
179
+ {"zero factor", []string{"0"}, -1, true},
180
+ {"not parsable", []string{"I'm not a number"}, -1, true},
181
+ }
182
+ for _, tt := range factors {
183
+ t.Run(tt.name, func(t *testing.T) {
184
+ if len(tt.value) == 1 {
185
+ t.Setenv("PERSISTENCE_MEMTABLES_MAX_SIZE_MB", tt.value[0])
186
+ }
187
+ conf := Config{}
188
+ err := FromEnv(&conf)
189
+
190
+ if tt.expectedErr {
191
+ require.NotNil(t, err)
192
+ } else {
193
+ require.Equal(t, tt.expected, conf.Persistence.MemtablesMaxSizeMB)
194
+ }
195
+ })
196
+ }
197
+ }
198
+
199
+ func TestEnvironmentMemtable_MinDuration(t *testing.T) {
200
+ factors := []struct {
201
+ name string
202
+ value []string
203
+ expected int
204
+ expectedErr bool
205
+ }{
206
+ {"Valid", []string{"100"}, 100, false},
207
+ {"not given", []string{}, DefaultPersistenceMemtablesMinDuration, false},
208
+ {"invalid factor", []string{"-1"}, -1, true},
209
+ {"zero factor", []string{"0"}, -1, true},
210
+ {"not parsable", []string{"I'm not a number"}, -1, true},
211
+ }
212
+ for _, tt := range factors {
213
+ t.Run(tt.name, func(t *testing.T) {
214
+ if len(tt.value) == 1 {
215
+ t.Setenv("PERSISTENCE_MEMTABLES_MIN_ACTIVE_DURATION_SECONDS", tt.value[0])
216
+ }
217
+ conf := Config{}
218
+ err := FromEnv(&conf)
219
+
220
+ if tt.expectedErr {
221
+ require.NotNil(t, err)
222
+ } else {
223
+ require.Equal(t, tt.expected, conf.Persistence.MemtablesMinActiveDurationSeconds)
224
+ }
225
+ })
226
+ }
227
+ }
228
+
229
+ func TestEnvironmentMemtable_MaxDuration(t *testing.T) {
230
+ factors := []struct {
231
+ name string
232
+ value []string
233
+ expected int
234
+ expectedErr bool
235
+ }{
236
+ {"Valid", []string{"100"}, 100, false},
237
+ {"not given", []string{}, DefaultPersistenceMemtablesMaxDuration, false},
238
+ {"invalid factor", []string{"-1"}, -1, true},
239
+ {"zero factor", []string{"0"}, -1, true},
240
+ {"not parsable", []string{"I'm not a number"}, -1, true},
241
+ }
242
+ for _, tt := range factors {
243
+ t.Run(tt.name, func(t *testing.T) {
244
+ if len(tt.value) == 1 {
245
+ t.Setenv("PERSISTENCE_MEMTABLES_MAX_ACTIVE_DURATION_SECONDS", tt.value[0])
246
+ }
247
+ conf := Config{}
248
+ err := FromEnv(&conf)
249
+
250
+ if tt.expectedErr {
251
+ require.NotNil(t, err)
252
+ } else {
253
+ require.Equal(t, tt.expected, conf.Persistence.MemtablesMaxActiveDurationSeconds)
254
+ }
255
+ })
256
+ }
257
+ }
258
+
259
+ func TestEnvironmentParseClusterConfig(t *testing.T) {
260
+ hostname, _ := os.Hostname()
261
+ tests := []struct {
262
+ name string
263
+ envVars map[string]string
264
+ expectedResult cluster.Config
265
+ expectedErr error
266
+ }{
267
+ {
268
+ name: "valid cluster config - ports and advertiseaddr provided",
269
+ envVars: map[string]string{
270
+ "CLUSTER_GOSSIP_BIND_PORT": "7100",
271
+ "CLUSTER_DATA_BIND_PORT": "7101",
272
+ "CLUSTER_ADVERTISE_ADDR": "193.0.0.1",
273
+ "CLUSTER_ADVERTISE_PORT": "9999",
274
+ },
275
+ expectedResult: cluster.Config{
276
+ Hostname: hostname,
277
+ GossipBindPort: 7100,
278
+ DataBindPort: 7101,
279
+ AdvertiseAddr: "193.0.0.1",
280
+ AdvertisePort: 9999,
281
+ MaintenanceNodes: make([]string, 0),
282
+ },
283
+ },
284
+ {
285
+ name: "valid cluster config - no ports and advertiseaddr provided",
286
+ expectedResult: cluster.Config{
287
+ Hostname: hostname,
288
+ GossipBindPort: DefaultGossipBindPort,
289
+ DataBindPort: DefaultGossipBindPort + 1,
290
+ AdvertiseAddr: "",
291
+ MaintenanceNodes: make([]string, 0),
292
+ },
293
+ },
294
+ {
295
+ name: "valid cluster config - only gossip bind port provided",
296
+ envVars: map[string]string{
297
+ "CLUSTER_GOSSIP_BIND_PORT": "7777",
298
+ },
299
+ expectedResult: cluster.Config{
300
+ Hostname: hostname,
301
+ GossipBindPort: 7777,
302
+ DataBindPort: 7778,
303
+ MaintenanceNodes: make([]string, 0),
304
+ },
305
+ },
306
+ {
307
+ name: "valid cluster config - both ports provided",
308
+ envVars: map[string]string{
309
+ "CLUSTER_GOSSIP_BIND_PORT": "7100",
310
+ "CLUSTER_DATA_BIND_PORT": "7111",
311
+ },
312
+ expectedResult: cluster.Config{
313
+ Hostname: hostname,
314
+ GossipBindPort: 7100,
315
+ DataBindPort: 7111,
316
+ MaintenanceNodes: make([]string, 0),
317
+ },
318
+ },
319
+ {
320
+ name: "schema sync disabled",
321
+ envVars: map[string]string{
322
+ "CLUSTER_IGNORE_SCHEMA_SYNC": "true",
323
+ },
324
+ expectedResult: cluster.Config{
325
+ Hostname: hostname,
326
+ GossipBindPort: 7946,
327
+ DataBindPort: 7947,
328
+ IgnoreStartupSchemaSync: true,
329
+ MaintenanceNodes: make([]string, 0),
330
+ },
331
+ },
332
+ }
333
+
334
+ for _, test := range tests {
335
+ t.Run(test.name, func(t *testing.T) {
336
+ for k, v := range test.envVars {
337
+ t.Setenv(k, v)
338
+ }
339
+ cfg, err := parseClusterConfig()
340
+ if test.expectedErr != nil {
341
+ assert.EqualError(t, err, test.expectedErr.Error(),
342
+ "expected err: %v, got: %v", test.expectedErr, err)
343
+ } else {
344
+ assert.Nil(t, err, "expected nil, got: %v", err)
345
+ assert.EqualValues(t, test.expectedResult, cfg)
346
+ }
347
+ })
348
+ }
349
+ }
350
+
351
+ func TestEnvironmentSetDefaultVectorDistanceMetric(t *testing.T) {
352
+ t.Run("DefaultVectorDistanceMetricIsEmpty", func(t *testing.T) {
353
+ os.Clearenv()
354
+ conf := Config{}
355
+ FromEnv(&conf)
356
+ require.Equal(t, "", conf.DefaultVectorDistanceMetric)
357
+ })
358
+
359
+ t.Run("NonEmptyDefaultVectorDistanceMetric", func(t *testing.T) {
360
+ os.Clearenv()
361
+ t.Setenv("DEFAULT_VECTOR_DISTANCE_METRIC", "l2-squared")
362
+ conf := Config{}
363
+ FromEnv(&conf)
364
+ require.Equal(t, "l2-squared", conf.DefaultVectorDistanceMetric)
365
+ })
366
+ }
367
+
368
+ func TestEnvironmentMaxConcurrentGetRequests(t *testing.T) {
369
+ factors := []struct {
370
+ name string
371
+ value []string
372
+ expected int
373
+ expectedErr bool
374
+ }{
375
+ {"Valid", []string{"100"}, 100, false},
376
+ {"not given", []string{}, DefaultMaxConcurrentGetRequests, false},
377
+ {"unlimited", []string{"-1"}, -1, false},
378
+ {"not parsable", []string{"I'm not a number"}, -1, true},
379
+ }
380
+ for _, tt := range factors {
381
+ t.Run(tt.name, func(t *testing.T) {
382
+ if len(tt.value) == 1 {
383
+ t.Setenv("MAXIMUM_CONCURRENT_GET_REQUESTS", tt.value[0])
384
+ }
385
+ conf := Config{}
386
+ err := FromEnv(&conf)
387
+
388
+ if tt.expectedErr {
389
+ require.NotNil(t, err)
390
+ } else {
391
+ require.Equal(t, tt.expected, conf.MaximumConcurrentGetRequests)
392
+ }
393
+ })
394
+ }
395
+ }
396
+
397
+ func TestEnvironmentCORS_Origin(t *testing.T) {
398
+ factors := []struct {
399
+ name string
400
+ value []string
401
+ expected string
402
+ expectedErr bool
403
+ }{
404
+ {"Valid", []string{"http://foo.com"}, "http://foo.com", false},
405
+ {"not given", []string{}, DefaultCORSAllowOrigin, false},
406
+ }
407
+ for _, tt := range factors {
408
+ t.Run(tt.name, func(t *testing.T) {
409
+ os.Clearenv()
410
+ if len(tt.value) == 1 {
411
+ os.Setenv("CORS_ALLOW_ORIGIN", tt.value[0])
412
+ }
413
+ conf := Config{}
414
+ err := FromEnv(&conf)
415
+
416
+ if tt.expectedErr {
417
+ require.NotNil(t, err)
418
+ } else {
419
+ require.Equal(t, tt.expected, conf.CORS.AllowOrigin)
420
+ }
421
+ })
422
+ }
423
+ }
424
+
425
+ func TestEnvironmentGRPCPort(t *testing.T) {
426
+ factors := []struct {
427
+ name string
428
+ value []string
429
+ expected int
430
+ expectedErr bool
431
+ }{
432
+ {"Valid", []string{"50052"}, 50052, false},
433
+ {"not given", []string{}, DefaultGRPCPort, false},
434
+ {"invalid factor", []string{"-1"}, -1, true},
435
+ {"zero factor", []string{"0"}, -1, true},
436
+ {"not parsable", []string{"I'm not a number"}, -1, true},
437
+ }
438
+ for _, tt := range factors {
439
+ t.Run(tt.name, func(t *testing.T) {
440
+ if len(tt.value) == 1 {
441
+ t.Setenv("GRPC_PORT", tt.value[0])
442
+ }
443
+ conf := Config{}
444
+ err := FromEnv(&conf)
445
+
446
+ if tt.expectedErr {
447
+ require.NotNil(t, err)
448
+ } else {
449
+ require.Equal(t, tt.expected, conf.GRPC.Port)
450
+ }
451
+ })
452
+ }
453
+ }
454
+
455
+ func TestEnvironmentCORS_Methods(t *testing.T) {
456
+ factors := []struct {
457
+ name string
458
+ value []string
459
+ expected string
460
+ expectedErr bool
461
+ }{
462
+ {"Valid", []string{"POST"}, "POST", false},
463
+ {"not given", []string{}, DefaultCORSAllowMethods, false},
464
+ }
465
+ for _, tt := range factors {
466
+ t.Run(tt.name, func(t *testing.T) {
467
+ os.Clearenv()
468
+ if len(tt.value) == 1 {
469
+ os.Setenv("CORS_ALLOW_METHODS", tt.value[0])
470
+ }
471
+ conf := Config{}
472
+ err := FromEnv(&conf)
473
+
474
+ if tt.expectedErr {
475
+ require.NotNil(t, err)
476
+ } else {
477
+ require.Equal(t, tt.expected, conf.CORS.AllowMethods)
478
+ }
479
+ })
480
+ }
481
+ }
482
+
483
+ func TestEnvironmentDisableGraphQL(t *testing.T) {
484
+ factors := []struct {
485
+ name string
486
+ value []string
487
+ expected bool
488
+ expectedErr bool
489
+ }{
490
+ {"Valid: true", []string{"true"}, true, false},
491
+ {"Valid: false", []string{"false"}, false, false},
492
+ {"Valid: 1", []string{"1"}, true, false},
493
+ {"Valid: 0", []string{"0"}, false, false},
494
+ {"Valid: on", []string{"on"}, true, false},
495
+ {"Valid: off", []string{"off"}, false, false},
496
+ {"not given", []string{}, false, false},
497
+ }
498
+ for _, tt := range factors {
499
+ t.Run(tt.name, func(t *testing.T) {
500
+ if len(tt.value) == 1 {
501
+ t.Setenv("DISABLE_GRAPHQL", tt.value[0])
502
+ }
503
+ conf := Config{}
504
+ err := FromEnv(&conf)
505
+
506
+ if tt.expectedErr {
507
+ require.NotNil(t, err)
508
+ } else {
509
+ require.Equal(t, tt.expected, conf.DisableGraphQL)
510
+ }
511
+ })
512
+ }
513
+ }
514
+
515
+ func TestEnvironmentCORS_Headers(t *testing.T) {
516
+ factors := []struct {
517
+ name string
518
+ value []string
519
+ expected string
520
+ expectedErr bool
521
+ }{
522
+ {"Valid", []string{"Authorization"}, "Authorization", false},
523
+ {"not given", []string{}, DefaultCORSAllowHeaders, false},
524
+ }
525
+ for _, tt := range factors {
526
+ t.Run(tt.name, func(t *testing.T) {
527
+ os.Clearenv()
528
+ if len(tt.value) == 1 {
529
+ os.Setenv("CORS_ALLOW_HEADERS", tt.value[0])
530
+ }
531
+ conf := Config{}
532
+ err := FromEnv(&conf)
533
+
534
+ if tt.expectedErr {
535
+ require.NotNil(t, err)
536
+ } else {
537
+ require.Equal(t, tt.expected, conf.CORS.AllowHeaders)
538
+ }
539
+ })
540
+ }
541
+ }
542
+
543
+ func TestEnvironmentPrometheusGroupClasses_OldName(t *testing.T) {
544
+ factors := []struct {
545
+ name string
546
+ value []string
547
+ expected bool
548
+ expectedErr bool
549
+ }{
550
+ {"Valid: true", []string{"true"}, true, false},
551
+ {"Valid: false", []string{"false"}, false, false},
552
+ {"Valid: 1", []string{"1"}, true, false},
553
+ {"Valid: 0", []string{"0"}, false, false},
554
+ {"Valid: on", []string{"on"}, true, false},
555
+ {"Valid: off", []string{"off"}, false, false},
556
+ {"not given", []string{}, false, false},
557
+ }
558
+ for _, tt := range factors {
559
+ t.Run(tt.name, func(t *testing.T) {
560
+ t.Setenv("PROMETHEUS_MONITORING_ENABLED", "true")
561
+ if len(tt.value) == 1 {
562
+ t.Setenv("PROMETHEUS_MONITORING_GROUP_CLASSES", tt.value[0])
563
+ }
564
+ conf := Config{}
565
+ err := FromEnv(&conf)
566
+
567
+ if tt.expectedErr {
568
+ require.NotNil(t, err)
569
+ } else {
570
+ require.Equal(t, tt.expected, conf.Monitoring.Group)
571
+ }
572
+ })
573
+ }
574
+ }
575
+
576
+ func TestEnvironmentPrometheusGroupClasses_NewName(t *testing.T) {
577
+ factors := []struct {
578
+ name string
579
+ value []string
580
+ expected bool
581
+ expectedErr bool
582
+ }{
583
+ {"Valid: true", []string{"true"}, true, false},
584
+ {"Valid: false", []string{"false"}, false, false},
585
+ {"Valid: 1", []string{"1"}, true, false},
586
+ {"Valid: 0", []string{"0"}, false, false},
587
+ {"Valid: on", []string{"on"}, true, false},
588
+ {"Valid: off", []string{"off"}, false, false},
589
+ {"not given", []string{}, false, false},
590
+ }
591
+ for _, tt := range factors {
592
+ t.Run(tt.name, func(t *testing.T) {
593
+ t.Setenv("PROMETHEUS_MONITORING_ENABLED", "true")
594
+ if len(tt.value) == 1 {
595
+ t.Setenv("PROMETHEUS_MONITORING_GROUP", tt.value[0])
596
+ }
597
+ conf := Config{}
598
+ err := FromEnv(&conf)
599
+
600
+ if tt.expectedErr {
601
+ require.NotNil(t, err)
602
+ } else {
603
+ require.Equal(t, tt.expected, conf.Monitoring.Group)
604
+ }
605
+ })
606
+ }
607
+ }
608
+
609
+ func TestEnvironmentMinimumReplicationFactor(t *testing.T) {
610
+ factors := []struct {
611
+ name string
612
+ value []string
613
+ expected int
614
+ expectedErr bool
615
+ }{
616
+ {"Valid", []string{"3"}, 3, false},
617
+ {"not given", []string{}, DefaultMinimumReplicationFactor, false},
618
+ {"invalid factor", []string{"-1"}, -1, true},
619
+ {"zero factor", []string{"0"}, -1, true},
620
+ {"not parsable", []string{"I'm not a number"}, -1, true},
621
+ }
622
+ for _, tt := range factors {
623
+ t.Run(tt.name, func(t *testing.T) {
624
+ if len(tt.value) == 1 {
625
+ t.Setenv("REPLICATION_MINIMUM_FACTOR", tt.value[0])
626
+ }
627
+ conf := Config{}
628
+ err := FromEnv(&conf)
629
+
630
+ if tt.expectedErr {
631
+ require.NotNil(t, err)
632
+ } else {
633
+ require.Equal(t, tt.expected, conf.Replication.MinimumFactor)
634
+ }
635
+ })
636
+ }
637
+ }
638
+
639
+ func TestEnvironmentQueryDefaults_Limit(t *testing.T) {
640
+ factors := []struct {
641
+ name string
642
+ value []string
643
+ config Config
644
+ expected int64
645
+ }{
646
+ {
647
+ name: "Valid",
648
+ value: []string{"3"},
649
+ config: Config{},
650
+ expected: 3,
651
+ },
652
+ {
653
+ name: "Valid with config already set",
654
+ value: []string{"3"},
655
+ config: Config{
656
+ QueryDefaults: QueryDefaults{
657
+ Limit: 20,
658
+ },
659
+ },
660
+ expected: 3,
661
+ },
662
+ {
663
+ name: "not given with config set",
664
+ value: []string{},
665
+ config: Config{
666
+ QueryDefaults: QueryDefaults{
667
+ Limit: 20,
668
+ },
669
+ },
670
+ expected: 20,
671
+ },
672
+ {
673
+ name: "not given with config set",
674
+ value: []string{},
675
+ config: Config{},
676
+ expected: DefaultQueryDefaultsLimit,
677
+ },
678
+ }
679
+ for _, tt := range factors {
680
+ t.Run(tt.name, func(t *testing.T) {
681
+ if len(tt.value) == 1 {
682
+ t.Setenv("QUERY_DEFAULTS_LIMIT", tt.value[0])
683
+ }
684
+ conf := tt.config
685
+ err := FromEnv(&conf)
686
+
687
+ require.Nil(t, err)
688
+ require.Equal(t, tt.expected, conf.QueryDefaults.Limit)
689
+ })
690
+ }
691
+ }
692
+
693
+ func TestEnvironmentAuthentication(t *testing.T) {
694
+ factors := []struct {
695
+ name string
696
+ auth_env_var []string
697
+ expected Authentication
698
+ }{
699
+ {
700
+ name: "Valid API Key",
701
+ auth_env_var: []string{"AUTHENTICATION_APIKEY_ENABLED"},
702
+ expected: Authentication{
703
+ APIKey: StaticAPIKey{
704
+ Enabled: true,
705
+ },
706
+ },
707
+ },
708
+ {
709
+ name: "Valid Anonymous Access",
710
+ auth_env_var: []string{"AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED"},
711
+ expected: Authentication{
712
+ AnonymousAccess: AnonymousAccess{
713
+ Enabled: true,
714
+ },
715
+ },
716
+ },
717
+ {
718
+ name: "Valid OIDC Auth",
719
+ auth_env_var: []string{"AUTHENTICATION_OIDC_ENABLED"},
720
+ expected: Authentication{
721
+ OIDC: OIDC{
722
+ Enabled: true,
723
+ Issuer: runtime.NewDynamicValue(""),
724
+ ClientID: runtime.NewDynamicValue(""),
725
+ SkipClientIDCheck: runtime.NewDynamicValue(false),
726
+ UsernameClaim: runtime.NewDynamicValue(""),
727
+ GroupsClaim: runtime.NewDynamicValue(""),
728
+ Scopes: runtime.NewDynamicValue([]string(nil)),
729
+ Certificate: runtime.NewDynamicValue(""),
730
+ JWKSUrl: runtime.NewDynamicValue(""),
731
+ },
732
+ },
733
+ },
734
+ {
735
+ name: "Enabled db user",
736
+ auth_env_var: []string{"AUTHENTICATION_DB_USERS_ENABLED"},
737
+ expected: Authentication{
738
+ DBUsers: DbUsers{Enabled: true},
739
+ },
740
+ },
741
+ {
742
+ name: "not given",
743
+ auth_env_var: []string{},
744
+ expected: Authentication{
745
+ AnonymousAccess: AnonymousAccess{
746
+ Enabled: true,
747
+ },
748
+ },
749
+ },
750
+ }
751
+ for _, tt := range factors {
752
+ t.Run(tt.name, func(t *testing.T) {
753
+ if len(tt.auth_env_var) == 1 {
754
+ t.Setenv(tt.auth_env_var[0], "true")
755
+ }
756
+ conf := Config{}
757
+ err := FromEnv(&conf)
758
+ require.Nil(t, err)
759
+ require.Equal(t, tt.expected, conf.Authentication)
760
+ })
761
+ }
762
+ }
763
+
764
+ func TestEnvironmentHNSWMaxLogSize(t *testing.T) {
765
+ factors := []struct {
766
+ name string
767
+ value []string
768
+ expected int64
769
+ expectedErr bool
770
+ }{
771
+ {"Valid no unit", []string{"3"}, 3, false},
772
+ {"Valid IEC unit", []string{"3KB"}, 3000, false},
773
+ {"Valid SI unit", []string{"3KiB"}, 3 * 1024, false},
774
+ {"not given", []string{}, DefaultPersistenceHNSWMaxLogSize, false},
775
+ {"invalid factor", []string{"-1"}, -1, true},
776
+ {"not parsable", []string{"I'm not a number"}, -1, true},
777
+ }
778
+ for _, tt := range factors {
779
+ t.Run(tt.name, func(t *testing.T) {
780
+ if len(tt.value) == 1 {
781
+ t.Setenv("PERSISTENCE_HNSW_MAX_LOG_SIZE", tt.value[0])
782
+ }
783
+ conf := Config{}
784
+ err := FromEnv(&conf)
785
+
786
+ if tt.expectedErr {
787
+ require.NotNil(t, err)
788
+ } else {
789
+ require.Equal(t, tt.expected, conf.Persistence.HNSWMaxLogSize)
790
+ }
791
+ })
792
+ }
793
+ }
794
+
795
+ func TestEnvironmentHNSWWaitForPrefill(t *testing.T) {
796
+ factors := []struct {
797
+ name string
798
+ value []string
799
+ expected bool
800
+ expectedErr bool
801
+ }{
802
+ {"Valid: true", []string{"true"}, true, false},
803
+ {"Valid: false", []string{"false"}, false, false},
804
+ {"Valid: 1", []string{"1"}, true, false},
805
+ {"Valid: 0", []string{"0"}, false, false},
806
+ {"Valid: on", []string{"on"}, true, false},
807
+ {"Valid: off", []string{"off"}, false, false},
808
+ {"not given", []string{}, false, false},
809
+ }
810
+ for _, tt := range factors {
811
+ t.Run(tt.name, func(t *testing.T) {
812
+ if len(tt.value) == 1 {
813
+ t.Setenv("HNSW_STARTUP_WAIT_FOR_VECTOR_CACHE", tt.value[0])
814
+ }
815
+ conf := Config{}
816
+ err := FromEnv(&conf)
817
+
818
+ if tt.expectedErr {
819
+ require.NotNil(t, err)
820
+ } else {
821
+ require.Equal(t, tt.expected, conf.HNSWStartupWaitForVectorCache)
822
+ }
823
+ })
824
+ }
825
+ }
826
+
827
+ func TestEnvironmentHNSWVisitedListPoolMaxSize(t *testing.T) {
828
+ factors := []struct {
829
+ name string
830
+ value []string
831
+ expected int
832
+ expectedErr bool
833
+ }{
834
+ {"Valid", []string{"3"}, 3, false},
835
+ {"not given", []string{}, DefaultHNSWVisitedListPoolSize, false},
836
+ {"valid negative", []string{"-1"}, -1, false},
837
+ {"not parsable", []string{"I'm not a number"}, -1, true},
838
+ }
839
+ for _, tt := range factors {
840
+ t.Run(tt.name, func(t *testing.T) {
841
+ if len(tt.value) == 1 {
842
+ t.Setenv("HNSW_VISITED_LIST_POOL_MAX_SIZE", tt.value[0])
843
+ }
844
+ conf := Config{}
845
+ err := FromEnv(&conf)
846
+
847
+ if tt.expectedErr {
848
+ require.NotNil(t, err)
849
+ } else {
850
+ require.Equal(t, tt.expected, conf.HNSWVisitedListPoolMaxSize)
851
+ }
852
+ })
853
+ }
854
+ }
855
+
856
+ func TestEnvironmentHNSWFlatSearchConcurrency(t *testing.T) {
857
+ factors := []struct {
858
+ name string
859
+ value []string
860
+ expected int
861
+ expectedErr bool
862
+ }{
863
+ {"Valid", []string{"3"}, 3, false},
864
+ {"not given", []string{}, DefaultHNSWFlatSearchConcurrency, false},
865
+ {"valid negative", []string{"-1"}, -1, true},
866
+ {"not parsable", []string{"I'm not a number"}, -1, true},
867
+ }
868
+ for _, tt := range factors {
869
+ t.Run(tt.name, func(t *testing.T) {
870
+ if len(tt.value) == 1 {
871
+ t.Setenv("HNSW_FLAT_SEARCH_CONCURRENCY", tt.value[0])
872
+ }
873
+ conf := Config{}
874
+ err := FromEnv(&conf)
875
+
876
+ if tt.expectedErr {
877
+ require.NotNil(t, err)
878
+ } else {
879
+ require.Equal(t, tt.expected, conf.HNSWFlatSearchConcurrency)
880
+ }
881
+ })
882
+ }
883
+ }
884
+
885
+ func TestEnvironmentHNSWAcornFilterRatio(t *testing.T) {
886
+ factors := []struct {
887
+ name string
888
+ value []string
889
+ expected float64
890
+ expectedErr bool
891
+ }{
892
+ {"Valid", []string{"0.5"}, 0.5, false},
893
+ {"not given", []string{}, 0.4, false},
894
+ {"max", []string{"0.0"}, 0.0, false},
895
+ {"min", []string{"1.0"}, 1.0, false},
896
+ {"negative", []string{"-1.2"}, -1.0, true},
897
+ {"too large", []string{"1.2"}, -1.0, true},
898
+ }
899
+ for _, tt := range factors {
900
+ t.Run(tt.name, func(t *testing.T) {
901
+ if len(tt.value) == 1 {
902
+ t.Setenv("HNSW_ACORN_FILTER_RATIO", tt.value[0])
903
+ }
904
+ conf := Config{}
905
+ err := FromEnv(&conf)
906
+
907
+ if tt.expectedErr {
908
+ require.NotNil(t, err)
909
+ } else {
910
+ require.Equal(t, tt.expected, conf.HNSWAcornFilterRatio)
911
+ }
912
+ })
913
+ }
914
+ }
915
+
916
+ func TestEnabledForHost(t *testing.T) {
917
+ localHostname := "weaviate-1"
918
+ envName := "HOSTBASED_SETTING"
919
+
920
+ enabledVals := []string{"enabled", "1", "true", "on", "weaviate-1", "weaviate-0,weaviate-1,weaviate-2"}
921
+ for _, val := range enabledVals {
922
+ t.Run(fmt.Sprintf("enabled %q", val), func(t *testing.T) {
923
+ t.Setenv(envName, val)
924
+ assert.True(t, enabledForHost(envName, localHostname))
925
+ })
926
+ }
927
+
928
+ disabledVals := []string{"disabled", "0", "false", "off", "weaviate-0", "weaviate-0,weaviate-2,weaviate-3", ""}
929
+ for _, val := range disabledVals {
930
+ t.Run(fmt.Sprintf("disabled %q", val), func(t *testing.T) {
931
+ t.Setenv(envName, val)
932
+ assert.False(t, enabledForHost(envName, localHostname))
933
+ })
934
+ }
935
+ }
936
+
937
+ func TestParseCollectionPropsTenants(t *testing.T) {
938
+ type testCase struct {
939
+ env string
940
+ expected []CollectionPropsTenants
941
+ expectedErrMsg string
942
+ }
943
+
944
+ p := newCollectionPropsTenantsParser()
945
+
946
+ testCases := []testCase{
947
+ {
948
+ env: "",
949
+ expected: []CollectionPropsTenants{},
950
+ },
951
+
952
+ // collections
953
+ {
954
+ env: "Collection1",
955
+ expected: []CollectionPropsTenants{
956
+ {Collection: "Collection1"},
957
+ },
958
+ },
959
+ {
960
+ env: "Collection1; Collection2; ;",
961
+ expected: []CollectionPropsTenants{
962
+ {Collection: "Collection1"},
963
+ {Collection: "Collection2"},
964
+ },
965
+ },
966
+ {
967
+ env: "Collection1:; Collection2::; ;",
968
+ expected: []CollectionPropsTenants{
969
+ {Collection: "Collection1"},
970
+ {Collection: "Collection2"},
971
+ },
972
+ },
973
+
974
+ // collections + props
975
+ {
976
+ env: "Collection1:prop1,prop2",
977
+ expected: []CollectionPropsTenants{
978
+ {
979
+ Collection: "Collection1",
980
+ Props: []string{"prop1", "prop2"},
981
+ },
982
+ },
983
+ },
984
+ {
985
+ env: "Collection1:prop1, prop2;Collection2:prop3: ;",
986
+ expected: []CollectionPropsTenants{
987
+ {
988
+ Collection: "Collection1",
989
+ Props: []string{"prop1", "prop2"},
990
+ },
991
+ {
992
+ Collection: "Collection2",
993
+ Props: []string{"prop3"},
994
+ },
995
+ },
996
+ },
997
+
998
+ // collections + tenants
999
+ {
1000
+ env: "Collection1::tenant1,tenant2",
1001
+ expected: []CollectionPropsTenants{
1002
+ {
1003
+ Collection: "Collection1",
1004
+ Tenants: []string{"tenant1", "tenant2"},
1005
+ },
1006
+ },
1007
+ },
1008
+ {
1009
+ env: "Collection1::tenant1, tenant2;Collection2::tenant3",
1010
+ expected: []CollectionPropsTenants{
1011
+ {
1012
+ Collection: "Collection1",
1013
+ Tenants: []string{"tenant1", "tenant2"},
1014
+ },
1015
+ {
1016
+ Collection: "Collection2",
1017
+ Tenants: []string{"tenant3"},
1018
+ },
1019
+ },
1020
+ },
1021
+
1022
+ // collections + props + tenants
1023
+ {
1024
+ env: "Collection1:prop1:tenant1,tenant2",
1025
+ expected: []CollectionPropsTenants{
1026
+ {
1027
+ Collection: "Collection1",
1028
+ Props: []string{"prop1"},
1029
+ Tenants: []string{"tenant1", "tenant2"},
1030
+ },
1031
+ },
1032
+ },
1033
+ {
1034
+ env: "Collection1:prop1 :tenant1, tenant2;Collection2:prop2,prop3 :tenant3 ; ",
1035
+ expected: []CollectionPropsTenants{
1036
+ {
1037
+ Collection: "Collection1",
1038
+ Props: []string{"prop1"},
1039
+ Tenants: []string{"tenant1", "tenant2"},
1040
+ },
1041
+ {
1042
+ Collection: "Collection2",
1043
+ Props: []string{"prop2", "prop3"},
1044
+ Tenants: []string{"tenant3"},
1045
+ },
1046
+ },
1047
+ },
1048
+
1049
+ // unique / merged
1050
+ {
1051
+ env: "Collection1:prop1,prop2:tenant1,tenant2;Collection2:propX;Collection1:prop2,prop3;Collection3::tenantY;Collection1:prop4:tenant2,tenant3",
1052
+ expected: []CollectionPropsTenants{
1053
+ {
1054
+ Collection: "Collection1",
1055
+ Props: []string{"prop1", "prop2", "prop3", "prop4"},
1056
+ Tenants: []string{"tenant1", "tenant2", "tenant3"},
1057
+ },
1058
+ {
1059
+ Collection: "Collection2",
1060
+ Props: []string{"propX"},
1061
+ },
1062
+ {
1063
+ Collection: "Collection3",
1064
+ Tenants: []string{"tenantY"},
1065
+ },
1066
+ },
1067
+ },
1068
+
1069
+ // errors
1070
+ {
1071
+ env: "lowerCaseCollectionName",
1072
+ expectedErrMsg: "invalid collection name",
1073
+ },
1074
+ {
1075
+ env: "InvalidChars#",
1076
+ expectedErrMsg: "invalid collection name",
1077
+ },
1078
+ {
1079
+ env: "Collection1:InvalidChars#",
1080
+ expectedErrMsg: "invalid property name",
1081
+ },
1082
+ {
1083
+ env: "Collection1::InvalidChars#",
1084
+ expectedErrMsg: "invalid tenant/shard name",
1085
+ },
1086
+ {
1087
+ env: ":prop",
1088
+ expectedErrMsg: "missing collection name",
1089
+ },
1090
+ {
1091
+ env: "::tenant",
1092
+ expectedErrMsg: "missing collection name",
1093
+ },
1094
+ {
1095
+ env: ":prop:tenant",
1096
+ expectedErrMsg: "missing collection name",
1097
+ },
1098
+ {
1099
+ env: "Collection1:::",
1100
+ expectedErrMsg: "too many parts",
1101
+ },
1102
+ {
1103
+ env: "Collection1:prop:tenant:",
1104
+ expectedErrMsg: "too many parts",
1105
+ },
1106
+ {
1107
+ env: "Collection1:prop:tenant:something",
1108
+ expectedErrMsg: "too many parts",
1109
+ },
1110
+ }
1111
+
1112
+ for _, tc := range testCases {
1113
+ t.Run(tc.env, func(t *testing.T) {
1114
+ cpts, err := p.parse(tc.env)
1115
+
1116
+ if tc.expectedErrMsg != "" {
1117
+ assert.ErrorContains(t, err, tc.expectedErrMsg)
1118
+ } else {
1119
+ assert.NoError(t, err)
1120
+ }
1121
+
1122
+ assert.ElementsMatch(t, tc.expected, cpts)
1123
+ })
1124
+ }
1125
+ }
1126
+
1127
+ func TestEnvironmentPersistenceMinMMapSize(t *testing.T) {
1128
+ factors := []struct {
1129
+ name string
1130
+ value []string
1131
+ expected int64
1132
+ expectedErr bool
1133
+ }{
1134
+ {"Valid no unit", []string{"3"}, 3, false},
1135
+ {"Valid IEC unit", []string{"3KB"}, 3000, false},
1136
+ {"Valid SI unit", []string{"3KiB"}, 3 * 1024, false},
1137
+ {"not given", []string{}, DefaultPersistenceMinMMapSize, false},
1138
+ {"invalid factor", []string{"-1"}, -1, true},
1139
+ {"not parsable", []string{"I'm not a number"}, -1, true},
1140
+ }
1141
+ for _, tt := range factors {
1142
+ t.Run(tt.name, func(t *testing.T) {
1143
+ if len(tt.value) == 1 {
1144
+ t.Setenv("PERSISTENCE_MIN_MMAP_SIZE", tt.value[0])
1145
+ }
1146
+ conf := Config{}
1147
+ err := FromEnv(&conf)
1148
+
1149
+ if tt.expectedErr {
1150
+ require.NotNil(t, err)
1151
+ } else {
1152
+ require.Equal(t, tt.expected, conf.Persistence.MinMMapSize)
1153
+ }
1154
+ })
1155
+ }
1156
+ }
1157
+
1158
+ func TestEnvironmentPersistenceMaxReuseWalSize(t *testing.T) {
1159
+ factors := []struct {
1160
+ name string
1161
+ value []string
1162
+ expected int64
1163
+ expectedErr bool
1164
+ }{
1165
+ {"Valid no unit", []string{"3"}, 3, false},
1166
+ {"Valid IEC unit", []string{"3KB"}, 3000, false},
1167
+ {"Valid SI unit", []string{"3KiB"}, 3 * 1024, false},
1168
+ {"not given", []string{}, DefaultPersistenceMaxReuseWalSize, false},
1169
+ {"invalid factor", []string{"-1"}, -1, true},
1170
+ {"not parsable", []string{"I'm not a number"}, -1, true},
1171
+ }
1172
+ for _, tt := range factors {
1173
+ t.Run(tt.name, func(t *testing.T) {
1174
+ if len(tt.value) == 1 {
1175
+ t.Setenv("PERSISTENCE_MAX_REUSE_WAL_SIZE", tt.value[0])
1176
+ }
1177
+ conf := Config{}
1178
+ err := FromEnv(&conf)
1179
+
1180
+ if tt.expectedErr {
1181
+ require.NotNil(t, err)
1182
+ } else {
1183
+ require.Equal(t, tt.expected, conf.Persistence.MaxReuseWalSize)
1184
+ }
1185
+ })
1186
+ }
1187
+ }
1188
+
1189
+ func TestParsePositiveFloat(t *testing.T) {
1190
+ tests := []struct {
1191
+ name string
1192
+ envName string
1193
+ envValue string
1194
+ defaultValue float64
1195
+ expected float64
1196
+ expectError bool
1197
+ }{
1198
+ {
1199
+ name: "valid positive float",
1200
+ envName: "TEST_POSITIVE_FLOAT",
1201
+ envValue: "1.5",
1202
+ defaultValue: 2.0,
1203
+ expected: 1.5,
1204
+ expectError: false,
1205
+ },
1206
+ {
1207
+ name: "valid integer as float",
1208
+ envName: "TEST_POSITIVE_FLOAT",
1209
+ envValue: "2",
1210
+ defaultValue: 1.0,
1211
+ expected: 2.0,
1212
+ expectError: false,
1213
+ },
1214
+ {
1215
+ name: "use default when env not set",
1216
+ envName: "TEST_POSITIVE_FLOAT",
1217
+ envValue: "",
1218
+ defaultValue: 3.0,
1219
+ expected: 3.0,
1220
+ expectError: false,
1221
+ },
1222
+ {
1223
+ name: "zero value should error",
1224
+ envName: "TEST_POSITIVE_FLOAT",
1225
+ envValue: "0",
1226
+ defaultValue: 1.0,
1227
+ expected: 0,
1228
+ expectError: true,
1229
+ },
1230
+ {
1231
+ name: "negative value should error",
1232
+ envName: "TEST_POSITIVE_FLOAT",
1233
+ envValue: "-1.5",
1234
+ defaultValue: 1.0,
1235
+ expected: 0,
1236
+ expectError: true,
1237
+ },
1238
+ {
1239
+ name: "invalid float should error",
1240
+ envName: "TEST_POSITIVE_FLOAT",
1241
+ envValue: "not-a-float",
1242
+ defaultValue: 1.0,
1243
+ expected: 0,
1244
+ expectError: true,
1245
+ },
1246
+ {
1247
+ name: "very small positive float",
1248
+ envName: "TEST_POSITIVE_FLOAT",
1249
+ envValue: "0.0000001",
1250
+ defaultValue: 1.0,
1251
+ expected: 0.0000001,
1252
+ expectError: false,
1253
+ },
1254
+ {
1255
+ name: "very large positive float",
1256
+ envName: "TEST_POSITIVE_FLOAT",
1257
+ envValue: "999999.999999",
1258
+ defaultValue: 1.0,
1259
+ expected: 999999.999999,
1260
+ expectError: false,
1261
+ },
1262
+ }
1263
+
1264
+ for _, tt := range tests {
1265
+ t.Run(tt.name, func(t *testing.T) {
1266
+ // Set up environment
1267
+ if tt.envValue != "" {
1268
+ t.Setenv(tt.envName, tt.envValue)
1269
+ } else {
1270
+ os.Unsetenv(tt.envName)
1271
+ }
1272
+
1273
+ // Create a variable to store the result
1274
+ var result float64
1275
+
1276
+ // Call the function
1277
+ err := parsePositiveFloat(tt.envName, func(val float64) {
1278
+ result = val
1279
+ }, tt.defaultValue)
1280
+
1281
+ // Check error
1282
+ if tt.expectError {
1283
+ assert.Error(t, err)
1284
+ if tt.envValue != "" {
1285
+ assert.Contains(t, err.Error(), tt.envName)
1286
+ }
1287
+ } else {
1288
+ assert.NoError(t, err)
1289
+ assert.Equal(t, tt.expected, result)
1290
+ }
1291
+ })
1292
+ }
1293
+ }
platform/dbops/binaries/weaviate-src/usecases/config/helpers_for_test.go ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package config
13
+
14
+ import "github.com/pkg/errors"
15
+
16
+ type fakeModuleProvider struct {
17
+ valid []string
18
+ }
19
+
20
+ func (f *fakeModuleProvider) ValidateVectorizer(moduleName string) error {
21
+ for _, valid := range f.valid {
22
+ if moduleName == valid {
23
+ return nil
24
+ }
25
+ }
26
+
27
+ return errors.Errorf("invalid vectorizer %q", moduleName)
28
+ }
platform/dbops/binaries/weaviate-src/usecases/config/parse_resource_strings.go ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package config
13
+
14
+ import (
15
+ "fmt"
16
+ "math"
17
+ "strconv"
18
+ "strings"
19
+ )
20
+
21
+ // parseResourceString takes a string like "1024", "1KiB", "43TiB" and converts it to an integer number of bytes.
22
+ func parseResourceString(resource string) (int64, error) {
23
+ resource = strings.TrimSpace(resource)
24
+
25
+ if strings.EqualFold(resource, "unlimited") || strings.EqualFold(resource, "nolimit") {
26
+ return math.MaxInt64, nil
27
+ }
28
+
29
+ // Find where the digits end
30
+ lastDigit := len(resource)
31
+ for i, r := range resource {
32
+ if r < '0' || r > '9' {
33
+ lastDigit = i
34
+ break
35
+ }
36
+ }
37
+
38
+ // Split the numeric part and the unit
39
+ number, unit := resource[:lastDigit], resource[lastDigit:]
40
+ unit = strings.TrimSpace(unit) // Clean up any surrounding whitespace
41
+ value, err := strconv.ParseInt(number, 10, 64)
42
+ if err != nil {
43
+ return 0, err
44
+ }
45
+
46
+ unitMultipliers := map[string]int64{
47
+ "": 1, // No unit means bytes
48
+ "B": 1,
49
+ "KiB": 1024,
50
+ "MiB": 1024 * 1024,
51
+ "GiB": 1024 * 1024 * 1024,
52
+ "TiB": 1024 * 1024 * 1024 * 1024,
53
+ "KB": 1000,
54
+ "MB": 1000 * 1000,
55
+ "GB": 1000 * 1000 * 1000,
56
+ "TB": 1000 * 1000 * 1000 * 1000,
57
+ }
58
+ multiplier, exists := unitMultipliers[unit]
59
+ if !exists {
60
+ return 0, fmt.Errorf("invalid or unsupported unit")
61
+ }
62
+
63
+ return value * multiplier, nil
64
+ }
platform/dbops/binaries/weaviate-src/usecases/config/parse_resource_strings_test.go ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package config
13
+
14
+ import (
15
+ "math"
16
+ "testing"
17
+ )
18
+
19
+ func TestParseResourceString(t *testing.T) {
20
+ tests := []struct {
21
+ name string
22
+ input string
23
+ expected int64
24
+ err bool
25
+ }{
26
+ {"ValidBytes", "1024", 1024, false},
27
+ {"ValidKiB", "1KiB", 1024, false},
28
+ {"ValidMiB", "500MiB", 500 * 1024 * 1024, false},
29
+ {"ValidTiB", "43TiB", 43 * 1024 * 1024 * 1024 * 1024, false},
30
+ {"ValidKB", "1KB", 1000, false},
31
+ {"ValidMB", "500MB", 500 * 1e6, false},
32
+ {"ValidTB", "43TB", 43 * 1e12, false},
33
+ {"InvalidUnit", "100GiL", 0, true},
34
+ {"InvalidNumber", "tenKiB", 0, true},
35
+ {"InvalidFormat", "1024 KiB", 1024 * 1024, false},
36
+ {"EmptyString", "", 0, true},
37
+ {"NoUnit", "12345", 12345, false},
38
+ {"Unlimited lower case", "unlimited", math.MaxInt64, false},
39
+ {"Unlimited unlimited upper case", "UNLIMITED", math.MaxInt64, false},
40
+ {"Nolimit lower case", "nolimit", math.MaxInt64, false},
41
+ {"Nolimit upper case", "NOLIMIT", math.MaxInt64, false},
42
+ }
43
+
44
+ for _, tc := range tests {
45
+ t.Run(tc.name, func(t *testing.T) {
46
+ result, err := parseResourceString(tc.input)
47
+ if (err != nil) != tc.err {
48
+ t.Errorf("parseResourceString(%s) expected error: %v, got: %v", tc.input, tc.err, err != nil)
49
+ }
50
+ if result != tc.expected {
51
+ t.Errorf("parseResourceString(%s) expected %d, got %d", tc.input, tc.expected, result)
52
+ }
53
+ })
54
+ }
55
+ }
platform/dbops/binaries/weaviate-src/usecases/config/runtimeconfig.go ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package config
13
+
14
+ import (
15
+ "bytes"
16
+ "fmt"
17
+ "io"
18
+ "reflect"
19
+ "strings"
20
+ "time"
21
+
22
+ "github.com/pkg/errors"
23
+ "github.com/sirupsen/logrus"
24
+ "gopkg.in/yaml.v3"
25
+
26
+ "github.com/weaviate/weaviate/usecases/config/runtime"
27
+ )
28
+
29
+ // WeaviateRuntimeConfig is the collection all the supported configs that is
30
+ // managed dynamically and can be overridden during runtime.
31
+ type WeaviateRuntimeConfig struct {
32
+ MaximumAllowedCollectionsCount *runtime.DynamicValue[int] `json:"maximum_allowed_collections_count" yaml:"maximum_allowed_collections_count"`
33
+ AutoschemaEnabled *runtime.DynamicValue[bool] `json:"autoschema_enabled" yaml:"autoschema_enabled"`
34
+ AsyncReplicationDisabled *runtime.DynamicValue[bool] `json:"async_replication_disabled" yaml:"async_replication_disabled"`
35
+ RevectorizeCheckDisabled *runtime.DynamicValue[bool] `json:"revectorize_check_disabled" yaml:"revectorize_check_disabled"`
36
+ ReplicaMovementMinimumAsyncWait *runtime.DynamicValue[time.Duration] `json:"replica_movement_minimum_async_wait" yaml:"replica_movement_minimum_async_wait"`
37
+ TenantActivityReadLogLevel *runtime.DynamicValue[string] `json:"tenant_activity_read_log_level" yaml:"tenant_activity_read_log_level"`
38
+ TenantActivityWriteLogLevel *runtime.DynamicValue[string] `json:"tenant_activity_write_log_level" yaml:"tenant_activity_write_log_level"`
39
+ QuerySlowLogEnabled *runtime.DynamicValue[bool] `json:"query_slow_log_enabled" yaml:"query_slow_log_enabled"`
40
+ QuerySlowLogThreshold *runtime.DynamicValue[time.Duration] `json:"query_slow_log_threshold" yaml:"query_slow_log_threshold"`
41
+ InvertedSorterDisabled *runtime.DynamicValue[bool] `json:"inverted_sorter_disabled" yaml:"inverted_sorter_disabled"`
42
+ UsageGCSBucket *runtime.DynamicValue[string] `json:"usage_gcs_bucket" yaml:"usage_gcs_bucket"`
43
+ UsageGCSPrefix *runtime.DynamicValue[string] `json:"usage_gcs_prefix" yaml:"usage_gcs_prefix"`
44
+ UsageS3Bucket *runtime.DynamicValue[string] `json:"usage_s3_bucket" yaml:"usage_s3_bucket"`
45
+ UsageS3Prefix *runtime.DynamicValue[string] `json:"usage_s3_prefix" yaml:"usage_s3_prefix"`
46
+ UsageScrapeInterval *runtime.DynamicValue[time.Duration] `json:"usage_scrape_interval" yaml:"usage_scrape_interval"`
47
+ UsageShardJitterInterval *runtime.DynamicValue[time.Duration] `json:"usage_shard_jitter_interval" yaml:"usage_shard_jitter_interval"`
48
+ UsagePolicyVersion *runtime.DynamicValue[string] `json:"usage_policy_version" yaml:"usage_policy_version"`
49
+ UsageVerifyPermissions *runtime.DynamicValue[bool] `json:"usage_verify_permissions" yaml:"usage_verify_permissions"`
50
+
51
+ // Experimental configs. Will be removed in the future.
52
+ OIDCIssuer *runtime.DynamicValue[string] `json:"exp_oidc_issuer" yaml:"exp_oidc_issuer"`
53
+ OIDCClientID *runtime.DynamicValue[string] `json:"exp_oidc_client_id" yaml:"exp_oidc_client_id"`
54
+ OIDCSkipClientIDCheck *runtime.DynamicValue[bool] `yaml:"exp_oidc_skip_client_id_check" json:"exp_oidc_skip_client_id_check"`
55
+ OIDCUsernameClaim *runtime.DynamicValue[string] `yaml:"exp_oidc_username_claim" json:"exp_oidc_username_claim"`
56
+ OIDCGroupsClaim *runtime.DynamicValue[string] `yaml:"exp_oidc_groups_claim" json:"exp_oidc_groups_claim"`
57
+ OIDCScopes *runtime.DynamicValue[[]string] `yaml:"exp_oidc_scopes" json:"exp_oidc_scopes"`
58
+ OIDCCertificate *runtime.DynamicValue[string] `yaml:"exp_oidc_certificate" json:"exp_oidc_certificate"`
59
+ DefaultQuantization *runtime.DynamicValue[string] `yaml:"default_quantization" json:"default_quantization"`
60
+ }
61
+
62
+ // ParseRuntimeConfig decode WeaviateRuntimeConfig from raw bytes of YAML.
63
+ func ParseRuntimeConfig(buf []byte) (*WeaviateRuntimeConfig, error) {
64
+ var conf WeaviateRuntimeConfig
65
+
66
+ dec := yaml.NewDecoder(bytes.NewReader(buf))
67
+
68
+ // To catch fields different than ones in the struct (say typo)
69
+ dec.KnownFields(true)
70
+
71
+ // Am empty runtime yaml file is still a valid file. So treating io.EOF as
72
+ // non-error case returns default values of config.
73
+ if err := dec.Decode(&conf); err != nil && !errors.Is(err, io.EOF) {
74
+ return nil, err
75
+ }
76
+ return &conf, nil
77
+ }
78
+
79
+ // UpdateConfig does in-place update of `source` config based on values available in
80
+ // `parsed` config.
81
+ func UpdateRuntimeConfig(log logrus.FieldLogger, source, parsed *WeaviateRuntimeConfig, hooks map[string]func() error) error {
82
+ if source == nil || parsed == nil {
83
+ return fmt.Errorf("source and parsed cannot be nil")
84
+ }
85
+
86
+ updateRuntimeConfig(log, reflect.ValueOf(*source), reflect.ValueOf(*parsed), hooks)
87
+ return nil
88
+ }
89
+
90
+ /*
91
+ Alright. `updateRuntimeConfig` needs some explanation.
92
+
93
+ We could have avoided using `reflection` all together, if we have written something like this.
94
+
95
+ func updateRuntimeConfig(source, parsed *WeaviateRuntimeConfig) error {
96
+ if parsed.MaximumAllowedCollectionsCount != nil {
97
+ source.MaximumAllowedCollectionsCount.SetValue(parsed.MaximumAllowedCollectionsCount.Get())
98
+ } else {
99
+ source.MaximumAllowedCollectionsCount.Reset()
100
+ }
101
+
102
+ if parsed.AsyncReplicationDisabled != nil {
103
+ source.AsyncReplicationDisabled.SetValue(parsed.AsyncReplicationDisabled.Get())
104
+ } else {
105
+ source.AsyncReplicationDisabled.Reset()
106
+ }
107
+
108
+ if parsed.AutoschemaEnabled != nil {
109
+ source.AutoschemaEnabled.SetValue(parsed.AutoschemaEnabled.Get())
110
+ } else {
111
+ source.AutoschemaEnabled.Reset()
112
+ }
113
+
114
+ return nil
115
+ }
116
+
117
+ But this approach has two serious drawbacks
118
+ 1. Everytime new config is supported, this function gets verbose as we have update for every struct fields in WeaviateRuntimeConfig
119
+ 2. The much bigger one is, what if consumer added a struct field, but failed to **update** this function?. This was a serious concern for me, more work for
120
+ consumers.
121
+
122
+ With this reflection method, we avoided that extra step from the consumer. This reflection approach is "logically" same as above implementation.
123
+ See "runtimeconfig_test.go" for more examples.
124
+ */
125
+
126
+ func updateRuntimeConfig(log logrus.FieldLogger, source, parsed reflect.Value, hooks map[string]func() error) {
127
+ // Basically we do following
128
+ //
129
+ // 1. Loop through all the `source` fields
130
+ // 2. Check if any of those fields exists in `parsed` (non-nil)
131
+ // 3. If parsed config doesn't contain the field from `source`, We reset source's field.
132
+ // so that it's default value takes preference.
133
+ // 4. If parsed config does contain the field from `source`, We update the value via `SetValue`.
134
+
135
+ logRecords := make([]updateLogRecord, 0)
136
+
137
+ for i := range source.NumField() {
138
+ sf := source.Field(i)
139
+ pf := parsed.Field(i)
140
+
141
+ r := updateLogRecord{
142
+ field: source.Type().Field(i).Name,
143
+ }
144
+
145
+ si := sf.Interface()
146
+ var pi any
147
+ if !pf.IsNil() {
148
+ pi = pf.Interface()
149
+ }
150
+
151
+ switch sv := si.(type) {
152
+ case *runtime.DynamicValue[int]:
153
+ r.oldV = sv.Get()
154
+ if pf.IsNil() {
155
+ // Means the config is removed
156
+ sv.Reset()
157
+ } else {
158
+ p := pi.(*runtime.DynamicValue[int])
159
+ sv.SetValue(p.Get())
160
+ }
161
+ r.newV = sv.Get()
162
+ case *runtime.DynamicValue[float64]:
163
+ r.oldV = sv.Get()
164
+ if pf.IsNil() {
165
+ // Means the config is removed
166
+ sv.Reset()
167
+ } else {
168
+ p := pi.(*runtime.DynamicValue[float64])
169
+ sv.SetValue(p.Get())
170
+ }
171
+ r.newV = sv.Get()
172
+ case *runtime.DynamicValue[bool]:
173
+ r.oldV = sv.Get()
174
+ if pf.IsNil() {
175
+ // Means the config is removed
176
+ sv.Reset()
177
+ } else {
178
+ p := pi.(*runtime.DynamicValue[bool])
179
+ sv.SetValue(p.Get())
180
+ }
181
+ r.newV = sv.Get()
182
+ case *runtime.DynamicValue[time.Duration]:
183
+ r.oldV = sv.Get()
184
+ if pf.IsNil() {
185
+ // Means the config is removed
186
+ sv.Reset()
187
+ } else {
188
+ p := pi.(*runtime.DynamicValue[time.Duration])
189
+ sv.SetValue(p.Get())
190
+ }
191
+ r.newV = sv.Get()
192
+ case *runtime.DynamicValue[string]:
193
+ r.oldV = sv.Get()
194
+ if pf.IsNil() {
195
+ // Means the config is removed
196
+ sv.Reset()
197
+ } else {
198
+ p := pi.(*runtime.DynamicValue[string])
199
+ sv.SetValue(p.Get())
200
+ }
201
+ r.newV = sv.Get()
202
+ case *runtime.DynamicValue[[]string]:
203
+ r.oldV = sv.Get()
204
+ if pf.IsNil() {
205
+ // Means the config is removed
206
+ sv.Reset()
207
+ } else {
208
+ p := pi.(*runtime.DynamicValue[[]string])
209
+ sv.SetValue(p.Get())
210
+ }
211
+ r.newV = sv.Get()
212
+ default:
213
+ panic(fmt.Sprintf("not recognized type: %#v, %#v", pi, si))
214
+ }
215
+
216
+ if !reflect.DeepEqual(r.newV, r.oldV) {
217
+ logRecords = append(logRecords, r)
218
+ }
219
+
220
+ }
221
+
222
+ // log the changes made as INFO for auditing.
223
+ for _, v := range logRecords {
224
+ log.WithFields(logrus.Fields{
225
+ "action": "runtime_overrides_changed",
226
+ "field": v.field,
227
+ "old_value": v.oldV,
228
+ "new_value": v.newV,
229
+ }).Infof("runtime overrides: config '%v' changed from '%v' to '%v'", v.field, v.oldV, v.newV)
230
+ }
231
+
232
+ for match, f := range hooks {
233
+ if matchUpdatedFields(match, logRecords) {
234
+ err := f()
235
+ if err != nil {
236
+ log.WithFields(logrus.Fields{
237
+ "action": "runtime_overrides_hooks",
238
+ "match": match,
239
+ }).Errorf("error calling runtime hooks for match %s, %v", match, err)
240
+ continue
241
+ }
242
+ log.WithFields(logrus.Fields{
243
+ "action": "runtime_overrides_hooks",
244
+ "match": match,
245
+ }).Infof("runtime overrides: hook ran for matching '%v' pattern", match)
246
+ }
247
+ }
248
+ }
249
+
250
+ // updateLogRecord is used to record changes during updating runtime config.
251
+ type updateLogRecord struct {
252
+ field string
253
+ oldV, newV any
254
+ }
255
+
256
+ func matchUpdatedFields(match string, records []updateLogRecord) bool {
257
+ for _, v := range records {
258
+ if strings.Contains(v.field, match) {
259
+ return true
260
+ }
261
+ }
262
+ return false
263
+ }
platform/dbops/binaries/weaviate-src/usecases/config/runtimeconfig_test.go ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package config
13
+
14
+ import (
15
+ "bytes"
16
+ "io"
17
+ "regexp"
18
+ "testing"
19
+ "time"
20
+
21
+ "github.com/go-jose/go-jose/v4/json"
22
+ "github.com/sirupsen/logrus"
23
+ "github.com/stretchr/testify/assert"
24
+ "github.com/stretchr/testify/require"
25
+ "gopkg.in/yaml.v3"
26
+
27
+ "github.com/weaviate/weaviate/usecases/config/runtime"
28
+ )
29
+
30
+ func TestParseRuntimeConfig(t *testing.T) {
31
+ // parser should fail if any unknown fields exist in the file
32
+ t.Run("parser should fail if any unknown fields exist in the file", func(t *testing.T) {
33
+ // rationale: Catch and fail early if any typo on the config file.
34
+
35
+ buf := []byte(`autoschema_enabled: true`)
36
+ cfg, err := ParseRuntimeConfig(buf)
37
+ require.NoError(t, err)
38
+ assert.Equal(t, true, cfg.AutoschemaEnabled.Get())
39
+
40
+ buf = []byte(`autoschema_enbaled: false`) // note: typo.
41
+ cfg, err = ParseRuntimeConfig(buf)
42
+ require.ErrorContains(t, err, "autoschema_enbaled") // should contain misspelled field
43
+ assert.Nil(t, cfg)
44
+ })
45
+
46
+ t.Run("YAML tag should be lower_snake_case", func(t *testing.T) {
47
+ var r WeaviateRuntimeConfig
48
+
49
+ jd, err := json.Marshal(r)
50
+ require.NoError(t, err)
51
+
52
+ var vv map[string]any
53
+ require.NoError(t, json.Unmarshal(jd, &vv))
54
+
55
+ for k := range vv {
56
+ // check if all the keys lower_snake_case.
57
+ assertConfigKey(t, k)
58
+ }
59
+ })
60
+
61
+ t.Run("JSON tag should be lower_snake_case in the runtime config", func(t *testing.T) {
62
+ var r WeaviateRuntimeConfig
63
+
64
+ yd, err := yaml.Marshal(r)
65
+ require.NoError(t, err)
66
+
67
+ var vv map[string]any
68
+ require.NoError(t, yaml.Unmarshal(yd, &vv))
69
+
70
+ for k := range vv {
71
+ // check if all the keys lower_snake_case.
72
+ assertConfigKey(t, k)
73
+ }
74
+ })
75
+ }
76
+
77
+ func TestUpdateRuntimeConfig(t *testing.T) {
78
+ log := logrus.New()
79
+ log.SetOutput(io.Discard)
80
+
81
+ t.Run("updating should reflect changes in registered configs", func(t *testing.T) {
82
+ var (
83
+ colCount runtime.DynamicValue[int]
84
+ autoSchema runtime.DynamicValue[bool]
85
+ asyncRep runtime.DynamicValue[bool]
86
+ readLogLevel runtime.DynamicValue[string]
87
+ writeLogLevel runtime.DynamicValue[string]
88
+ revectorizeCheckDisabled runtime.DynamicValue[bool]
89
+ minFinWait runtime.DynamicValue[time.Duration]
90
+ )
91
+
92
+ reg := &WeaviateRuntimeConfig{
93
+ MaximumAllowedCollectionsCount: &colCount,
94
+ AutoschemaEnabled: &autoSchema,
95
+ AsyncReplicationDisabled: &asyncRep,
96
+ TenantActivityReadLogLevel: &readLogLevel,
97
+ TenantActivityWriteLogLevel: &writeLogLevel,
98
+ RevectorizeCheckDisabled: &revectorizeCheckDisabled,
99
+ ReplicaMovementMinimumAsyncWait: &minFinWait,
100
+ }
101
+
102
+ // parsed from yaml configs for example
103
+ buf := []byte(`autoschema_enabled: true
104
+ maximum_allowed_collections_count: 13
105
+ replica_movement_minimum_async_wait: 10s`)
106
+ parsed, err := ParseRuntimeConfig(buf)
107
+ require.NoError(t, err)
108
+
109
+ // before update (zero values)
110
+ assert.Equal(t, false, autoSchema.Get())
111
+ assert.Equal(t, 0, colCount.Get())
112
+ assert.Equal(t, 0*time.Second, minFinWait.Get())
113
+
114
+ require.NoError(t, UpdateRuntimeConfig(log, reg, parsed, nil))
115
+
116
+ // after update (reflect from parsed values)
117
+ assert.Equal(t, true, autoSchema.Get())
118
+ assert.Equal(t, 13, colCount.Get())
119
+ assert.Equal(t, 10*time.Second, minFinWait.Get())
120
+ })
121
+
122
+ t.Run("Add and remove workflow", func(t *testing.T) {
123
+ // 1. We start with empty overrides and see it doesn't change the .Get() value of source configs.
124
+ // 2. We add some overrides. Check .Get() value
125
+ // 3. Remove the overrides. check .Get() value goes back to default
126
+
127
+ source := &WeaviateRuntimeConfig{
128
+ MaximumAllowedCollectionsCount: runtime.NewDynamicValue(10),
129
+ AutoschemaEnabled: runtime.NewDynamicValue(true),
130
+ AsyncReplicationDisabled: runtime.NewDynamicValue(true),
131
+ TenantActivityReadLogLevel: runtime.NewDynamicValue("INFO"),
132
+ TenantActivityWriteLogLevel: runtime.NewDynamicValue("INFO"),
133
+ RevectorizeCheckDisabled: runtime.NewDynamicValue(true),
134
+ }
135
+
136
+ assert.Equal(t, 10, source.MaximumAllowedCollectionsCount.Get())
137
+ assert.Equal(t, true, source.AutoschemaEnabled.Get())
138
+ assert.Equal(t, true, source.AsyncReplicationDisabled.Get())
139
+ assert.Equal(t, "INFO", source.TenantActivityReadLogLevel.Get())
140
+ assert.Equal(t, "INFO", source.TenantActivityWriteLogLevel.Get())
141
+ assert.Equal(t, true, source.RevectorizeCheckDisabled.Get())
142
+
143
+ // Empty Parsing
144
+ buf := []byte("")
145
+ parsed, err := ParseRuntimeConfig(buf)
146
+ require.NoError(t, err)
147
+
148
+ assert.Nil(t, parsed.AsyncReplicationDisabled)
149
+ assert.Nil(t, parsed.MaximumAllowedCollectionsCount)
150
+ assert.Nil(t, parsed.AutoschemaEnabled)
151
+ assert.Nil(t, parsed.TenantActivityReadLogLevel)
152
+ assert.Nil(t, parsed.TenantActivityWriteLogLevel)
153
+ assert.Nil(t, parsed.RevectorizeCheckDisabled)
154
+
155
+ require.NoError(t, UpdateRuntimeConfig(log, source, parsed, nil))
156
+ assert.Equal(t, 10, source.MaximumAllowedCollectionsCount.Get())
157
+ assert.Equal(t, true, source.AutoschemaEnabled.Get())
158
+ assert.Equal(t, true, source.AsyncReplicationDisabled.Get())
159
+ assert.Equal(t, "INFO", source.TenantActivityReadLogLevel.Get())
160
+ assert.Equal(t, "INFO", source.TenantActivityWriteLogLevel.Get())
161
+ assert.Equal(t, true, source.RevectorizeCheckDisabled.Get())
162
+
163
+ // Non-empty parsing
164
+ buf = []byte(`autoschema_enabled: false
165
+ maximum_allowed_collections_count: 13`) // leaving out `asyncRep` config
166
+ parsed, err = ParseRuntimeConfig(buf)
167
+ require.NoError(t, err)
168
+
169
+ require.NoError(t, UpdateRuntimeConfig(log, source, parsed, nil))
170
+ assert.Equal(t, 13, source.MaximumAllowedCollectionsCount.Get()) // changed
171
+ assert.Equal(t, false, source.AutoschemaEnabled.Get()) // changed
172
+ assert.Equal(t, true, source.AsyncReplicationDisabled.Get())
173
+ assert.Equal(t, "INFO", source.TenantActivityReadLogLevel.Get())
174
+ assert.Equal(t, "INFO", source.TenantActivityWriteLogLevel.Get())
175
+ assert.Equal(t, true, source.RevectorizeCheckDisabled.Get())
176
+
177
+ // Empty parsing again. Should go back to default values
178
+ buf = []byte("")
179
+ parsed, err = ParseRuntimeConfig(buf)
180
+ require.NoError(t, err)
181
+
182
+ require.NoError(t, UpdateRuntimeConfig(log, source, parsed, nil))
183
+ assert.Equal(t, 10, source.MaximumAllowedCollectionsCount.Get())
184
+ assert.Equal(t, true, source.AutoschemaEnabled.Get())
185
+ assert.Equal(t, true, source.AsyncReplicationDisabled.Get())
186
+ assert.Equal(t, "INFO", source.TenantActivityReadLogLevel.Get())
187
+ assert.Equal(t, "INFO", source.TenantActivityWriteLogLevel.Get())
188
+ assert.Equal(t, true, source.RevectorizeCheckDisabled.Get())
189
+ })
190
+
191
+ t.Run("Reset() of non-exist config values in parsed yaml shouldn't panic", func(t *testing.T) {
192
+ var (
193
+ colCount runtime.DynamicValue[int]
194
+ autoSchema runtime.DynamicValue[bool]
195
+ // leaving out `asyncRep` config
196
+ )
197
+
198
+ reg := &WeaviateRuntimeConfig{
199
+ MaximumAllowedCollectionsCount: &colCount,
200
+ AutoschemaEnabled: &autoSchema,
201
+ // leaving out `asyncRep` config
202
+ }
203
+
204
+ // parsed from yaml configs for example
205
+ buf := []byte(`autoschema_enabled: true
206
+ maximum_allowed_collections_count: 13`) // leaving out `asyncRep` config
207
+ parsed, err := ParseRuntimeConfig(buf)
208
+ require.NoError(t, err)
209
+
210
+ // before update (zero values)
211
+ assert.Equal(t, false, autoSchema.Get())
212
+ assert.Equal(t, 0, colCount.Get())
213
+
214
+ require.NotPanics(t, func() { UpdateRuntimeConfig(log, reg, parsed, nil) })
215
+
216
+ // after update (reflect from parsed values)
217
+ assert.Equal(t, true, autoSchema.Get())
218
+ assert.Equal(t, 13, colCount.Get())
219
+ })
220
+
221
+ t.Run("updating config should split out corresponding log lines", func(t *testing.T) {
222
+ log := logrus.New()
223
+ logs := bytes.Buffer{}
224
+ log.SetOutput(&logs)
225
+
226
+ var (
227
+ colCount = runtime.NewDynamicValue(7)
228
+ autoSchema runtime.DynamicValue[bool]
229
+ )
230
+
231
+ reg := &WeaviateRuntimeConfig{
232
+ MaximumAllowedCollectionsCount: colCount,
233
+ AutoschemaEnabled: &autoSchema,
234
+ }
235
+
236
+ // parsed from yaml configs for example
237
+ buf := []byte(`autoschema_enabled: true
238
+ maximum_allowed_collections_count: 13`) // leaving out `asyncRep` config
239
+ parsed, err := ParseRuntimeConfig(buf)
240
+ require.NoError(t, err)
241
+
242
+ // before update (zero values)
243
+ assert.Equal(t, false, autoSchema.Get())
244
+ assert.Equal(t, 7, colCount.Get())
245
+
246
+ require.NoError(t, UpdateRuntimeConfig(log, reg, parsed, nil))
247
+ assert.Contains(t, logs.String(), `level=info msg="runtime overrides: config 'MaximumAllowedCollectionsCount' changed from '7' to '13'" action=runtime_overrides_changed field=MaximumAllowedCollectionsCount new_value=13 old_value=7`)
248
+ assert.Contains(t, logs.String(), `level=info msg="runtime overrides: config 'AutoschemaEnabled' changed from 'false' to 'true'" action=runtime_overrides_changed field=AutoschemaEnabled new_value=true old_value=false`)
249
+ logs.Reset()
250
+
251
+ // change configs
252
+ buf = []byte(`autoschema_enabled: false
253
+ maximum_allowed_collections_count: 10`)
254
+ parsed, err = ParseRuntimeConfig(buf)
255
+ require.NoError(t, err)
256
+
257
+ require.NoError(t, UpdateRuntimeConfig(log, reg, parsed, nil))
258
+ assert.Contains(t, logs.String(), `level=info msg="runtime overrides: config 'MaximumAllowedCollectionsCount' changed from '13' to '10'" action=runtime_overrides_changed field=MaximumAllowedCollectionsCount new_value=10 old_value=13`)
259
+ assert.Contains(t, logs.String(), `level=info msg="runtime overrides: config 'AutoschemaEnabled' changed from 'true' to 'false'" action=runtime_overrides_changed field=AutoschemaEnabled new_value=false old_value=true`)
260
+ logs.Reset()
261
+
262
+ // remove configs (`maximum_allowed_collections_count`)
263
+ buf = []byte(`autoschema_enabled: false`)
264
+ parsed, err = ParseRuntimeConfig(buf)
265
+ require.NoError(t, err)
266
+
267
+ require.NoError(t, UpdateRuntimeConfig(log, reg, parsed, nil))
268
+ assert.Contains(t, logs.String(), `level=info msg="runtime overrides: config 'MaximumAllowedCollectionsCount' changed from '10' to '7'" action=runtime_overrides_changed field=MaximumAllowedCollectionsCount new_value=7 old_value=10`)
269
+ })
270
+
271
+ t.Run("updating priorities", func(t *testing.T) {
272
+ // invariants:
273
+ // 1. If field doesn't exist, should return default value
274
+ // 2. If field exist, but removed next time, should return default value not the old value.
275
+
276
+ var (
277
+ colCount runtime.DynamicValue[int]
278
+ autoSchema runtime.DynamicValue[bool]
279
+ asyncRep runtime.DynamicValue[bool]
280
+ readLogLevel runtime.DynamicValue[string]
281
+ writeLogLevel runtime.DynamicValue[string]
282
+ revectorizeCheckDisabled runtime.DynamicValue[bool]
283
+ minFinWait runtime.DynamicValue[time.Duration]
284
+ )
285
+
286
+ reg := &WeaviateRuntimeConfig{
287
+ MaximumAllowedCollectionsCount: &colCount,
288
+ AutoschemaEnabled: &autoSchema,
289
+ AsyncReplicationDisabled: &asyncRep,
290
+ TenantActivityReadLogLevel: &readLogLevel,
291
+ TenantActivityWriteLogLevel: &writeLogLevel,
292
+ RevectorizeCheckDisabled: &revectorizeCheckDisabled,
293
+ ReplicaMovementMinimumAsyncWait: &minFinWait,
294
+ }
295
+
296
+ // parsed from yaml configs for example
297
+ buf := []byte(`autoschema_enabled: true
298
+ maximum_allowed_collections_count: 13
299
+ replica_movement_minimum_async_wait: 10s`)
300
+ parsed, err := ParseRuntimeConfig(buf)
301
+ require.NoError(t, err)
302
+
303
+ // before update (zero values)
304
+ assert.Equal(t, false, autoSchema.Get())
305
+ assert.Equal(t, 0, colCount.Get())
306
+ assert.Equal(t, false, asyncRep.Get()) // this field doesn't exist in original config file.
307
+ assert.Equal(t, 0*time.Second, minFinWait.Get())
308
+
309
+ require.NoError(t, UpdateRuntimeConfig(log, reg, parsed, nil))
310
+
311
+ // after update (reflect from parsed values)
312
+ assert.Equal(t, true, autoSchema.Get())
313
+ assert.Equal(t, 13, colCount.Get())
314
+ assert.Equal(t, false, asyncRep.Get()) // this field doesn't exist in original config file, should return default value.
315
+ assert.Equal(t, 10*time.Second, minFinWait.Get())
316
+
317
+ // removing `maximum_allowed_collection_count` from config
318
+ buf = []byte(`autoschema_enabled: false`)
319
+ parsed, err = ParseRuntimeConfig(buf)
320
+ require.NoError(t, err)
321
+
322
+ // before update. Should have old values
323
+ assert.Equal(t, true, autoSchema.Get())
324
+ assert.Equal(t, 13, colCount.Get())
325
+ assert.Equal(t, false, asyncRep.Get()) // this field doesn't exist in original config file, should return default value.
326
+
327
+ require.NoError(t, UpdateRuntimeConfig(log, reg, parsed, nil))
328
+
329
+ // after update.
330
+ assert.Equal(t, false, autoSchema.Get())
331
+ assert.Equal(t, 0, colCount.Get()) // this should still return `default` value. not old value
332
+ assert.Equal(t, false, asyncRep.Get()) // this field doesn't exist in original config file, should return default value.
333
+ })
334
+ }
335
+
336
+ // helpers
337
+ // assertConfigKey asserts if the `yaml` key is standard `lower_snake_case` (e.g: not `UPPER_CASE`)
338
+ func assertConfigKey(t *testing.T, key string) {
339
+ t.Helper()
340
+
341
+ re := regexp.MustCompile(`^[a-z0-9]+(_[a-z0-9]+)*$`)
342
+ if !re.MatchString(key) {
343
+ t.Fatalf("given key %v is not lower snake case. The json/yaml tag for runtime config should be all lower snake case (e.g my_key, not MY_KEY)", key)
344
+ }
345
+ }
platform/dbops/binaries/weaviate-src/usecases/connstate/manager.go ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package connstate
13
+
14
+ import (
15
+ "context"
16
+ "encoding/json"
17
+ "fmt"
18
+
19
+ "github.com/sirupsen/logrus"
20
+ )
21
+
22
+ // Manager can save and load a connector's internal state into a remote storage
23
+ type Manager struct {
24
+ repo Repo
25
+ state json.RawMessage
26
+ logger logrus.FieldLogger
27
+ }
28
+
29
+ // Repo describes the dependencies of the connector state manager to an
30
+ // external storage
31
+ type Repo interface {
32
+ Save(ctx context.Context, state json.RawMessage) error
33
+ Load(ctx context.Context) (json.RawMessage, error)
34
+ }
35
+
36
+ // NewManager for Connector State
37
+ func NewManager(repo Repo, logger logrus.FieldLogger) (*Manager, error) {
38
+ m := &Manager{repo: repo, logger: logger}
39
+ if err := m.loadOrInitialize(context.Background()); err != nil {
40
+ return nil, fmt.Errorf("could not load or initialize: %w", err)
41
+ }
42
+
43
+ return m, nil
44
+ }
45
+
46
+ // GetInitialState is only supposed to be used during initialization of the
47
+ // connector.
48
+ func (m *Manager) GetInitialState() json.RawMessage {
49
+ return m.state
50
+ }
51
+
52
+ // SetState form outside (i.e. from the connector)
53
+ func (m *Manager) SetState(ctx context.Context, state json.RawMessage) error {
54
+ m.state = state
55
+ return m.save(ctx)
56
+ }
57
+
58
+ // func (l *etcdSchemaManager) SetStateConnector(stateConnector connector_state.Connector) {
59
+ // l.connectorStateSetter = stateConnector
60
+ // }
61
+
62
+ func (m *Manager) loadOrInitialize(ctx context.Context) error {
63
+ state, err := m.repo.Load(ctx)
64
+ if err != nil {
65
+ return fmt.Errorf("could not load connector state: %w", err)
66
+ }
67
+
68
+ if state == nil {
69
+ m.state = json.RawMessage([]byte("{}"))
70
+ return m.save(ctx)
71
+ }
72
+
73
+ m.state = state
74
+ return nil
75
+ }
76
+
77
+ func (m *Manager) save(ctx context.Context) error {
78
+ m.logger.
79
+ WithField("action", "connector_state_update").
80
+ WithField("configuration_store", "etcd").
81
+ Debug("saving updated connector state to configuration store")
82
+
83
+ err := m.repo.Save(ctx, m.state)
84
+ if err != nil {
85
+ return fmt.Errorf("could not save connector state: %w", err)
86
+ }
87
+
88
+ return nil
89
+ }
platform/dbops/binaries/weaviate-src/usecases/distributedtask/handler.go ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package distributedtask
13
+
14
+ import (
15
+ "context"
16
+ "encoding/json"
17
+ "fmt"
18
+ "sort"
19
+
20
+ "github.com/go-openapi/strfmt"
21
+ "github.com/weaviate/weaviate/cluster/distributedtask"
22
+ "github.com/weaviate/weaviate/entities/models"
23
+ "github.com/weaviate/weaviate/usecases/auth/authorization"
24
+ )
25
+
26
+ type Handler struct {
27
+ authorizer authorization.Authorizer
28
+ tasksLister distributedtask.TasksLister
29
+ }
30
+
31
+ func NewHandler(authorizer authorization.Authorizer, taskLister distributedtask.TasksLister) *Handler {
32
+ return &Handler{
33
+ authorizer: authorizer,
34
+ tasksLister: taskLister,
35
+ }
36
+ }
37
+
38
+ func (h *Handler) ListTasks(ctx context.Context, principal *models.Principal) (models.DistributedTasks, error) {
39
+ if err := h.authorizer.Authorize(ctx, principal, authorization.READ, authorization.Cluster()); err != nil {
40
+ return nil, err
41
+ }
42
+
43
+ tasksByNamespace, err := h.tasksLister.ListDistributedTasks(ctx)
44
+ if err != nil {
45
+ return nil, fmt.Errorf("list distributed tasks: %w", err)
46
+ }
47
+
48
+ resp := models.DistributedTasks{}
49
+ for namespace, tasks := range tasksByNamespace {
50
+ resp[namespace] = make([]models.DistributedTask, 0, len(tasks))
51
+ for _, task := range tasks {
52
+ var finishedNodes []string
53
+ for node := range task.FinishedNodes {
54
+ finishedNodes = append(finishedNodes, node)
55
+ }
56
+ // sort so it would be more deterministic and easier to test
57
+ sort.Strings(finishedNodes)
58
+
59
+ // Try to unmarshal the raw payload into a generic JSON object.
60
+ // If we introduce sensitive information to the payload, we can
61
+ // add another method to Provider to unmarshal the payload and strip all the sensitive data.
62
+ var payload map[string]interface{}
63
+ if err = json.Unmarshal(task.Payload, &payload); err != nil {
64
+ return nil, fmt.Errorf("unmarshal payload: %w", err)
65
+ }
66
+
67
+ resp[namespace] = append(resp[namespace], models.DistributedTask{
68
+ ID: task.ID,
69
+ Version: int64(task.Version),
70
+ Status: task.Status.String(),
71
+ Error: task.Error,
72
+ StartedAt: strfmt.DateTime(task.StartedAt),
73
+ FinishedAt: strfmt.DateTime(task.FinishedAt),
74
+ FinishedNodes: finishedNodes,
75
+ Payload: payload,
76
+ })
77
+ }
78
+ }
79
+
80
+ return resp, nil
81
+ }
platform/dbops/binaries/weaviate-src/usecases/distributedtask/handler_test.go ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package distributedtask
13
+
14
+ import (
15
+ "context"
16
+ "testing"
17
+ "time"
18
+
19
+ "github.com/go-openapi/strfmt"
20
+ "github.com/stretchr/testify/mock"
21
+ "github.com/stretchr/testify/require"
22
+ "github.com/weaviate/weaviate/cluster/distributedtask"
23
+ "github.com/weaviate/weaviate/entities/models"
24
+ "github.com/weaviate/weaviate/usecases/auth/authorization"
25
+ )
26
+
27
+ func TestHandler_ListTasks(t *testing.T) {
28
+ var (
29
+ authorizer = authorization.NewMockAuthorizer(t)
30
+ now = time.Now()
31
+
32
+ namespace = "testNamespace"
33
+ lister = taskListerStub{
34
+ items: map[string][]*distributedtask.Task{
35
+ namespace: {
36
+ {
37
+ Namespace: namespace,
38
+ TaskDescriptor: distributedtask.TaskDescriptor{
39
+ ID: "test-task-1",
40
+ Version: 10,
41
+ },
42
+ Payload: []byte(`{"hello": "world"}`),
43
+ Status: distributedtask.TaskStatusFailed,
44
+ StartedAt: now.Add(-time.Hour),
45
+ FinishedAt: now,
46
+ Error: "server is on fire",
47
+ FinishedNodes: map[string]bool{
48
+ "node1": true,
49
+ "node2": true,
50
+ },
51
+ },
52
+ },
53
+ },
54
+ }
55
+ h = NewHandler(authorizer, lister)
56
+ )
57
+
58
+ authorizer.EXPECT().Authorize(mock.Anything, mock.Anything, authorization.READ, authorization.Cluster()).Return(nil)
59
+
60
+ tasks, err := h.ListTasks(context.Background(), &models.Principal{})
61
+ require.NoError(t, err)
62
+
63
+ require.Equal(t, models.DistributedTasks{
64
+ "testNamespace": []models.DistributedTask{
65
+ {
66
+ ID: "test-task-1",
67
+ Version: 10,
68
+ Status: "FAILED",
69
+ Error: "server is on fire",
70
+ StartedAt: strfmt.DateTime(now.Add(-time.Hour)),
71
+ FinishedAt: strfmt.DateTime(now),
72
+ FinishedNodes: []string{"node1", "node2"},
73
+ Payload: map[string]interface{}{"hello": "world"},
74
+ },
75
+ },
76
+ }, tasks)
77
+ }
78
+
79
+ type taskListerStub struct {
80
+ items map[string][]*distributedtask.Task
81
+ }
82
+
83
+ func (t taskListerStub) ListDistributedTasks(ctx context.Context) (map[string][]*distributedtask.Task, error) {
84
+ return t.items, nil
85
+ }
platform/dbops/binaries/weaviate-src/usecases/fakes/fake_cluster_state.go ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package fakes
13
+
14
+ import (
15
+ command "github.com/weaviate/weaviate/cluster/proto/api"
16
+ "github.com/weaviate/weaviate/usecases/cluster"
17
+ "github.com/weaviate/weaviate/usecases/cluster/mocks"
18
+ )
19
+
20
+ type FakeClusterState struct {
21
+ cluster.NodeSelector
22
+ syncIgnored bool
23
+ skipRepair bool
24
+ }
25
+
26
+ func NewFakeClusterState(hosts ...string) *FakeClusterState {
27
+ if len(hosts) == 0 {
28
+ hosts = []string{"node-1"}
29
+ }
30
+
31
+ return &FakeClusterState{
32
+ NodeSelector: mocks.NewMockNodeSelector(hosts...),
33
+ }
34
+ }
35
+
36
+ func (f *FakeClusterState) SchemaSyncIgnored() bool {
37
+ return f.syncIgnored
38
+ }
39
+
40
+ func (f *FakeClusterState) SkipSchemaRepair() bool {
41
+ return f.skipRepair
42
+ }
43
+
44
+ func (f *FakeClusterState) Hostnames() []string {
45
+ return f.StorageCandidates()
46
+ }
47
+
48
+ func (f *FakeClusterState) AllNames() []string {
49
+ return f.StorageCandidates()
50
+ }
51
+
52
+ func (f *FakeClusterState) LocalName() string {
53
+ return "node1"
54
+ }
55
+
56
+ func (f *FakeClusterState) NodeCount() int {
57
+ return 1
58
+ }
59
+
60
+ func (f *FakeClusterState) ClusterHealthScore() int {
61
+ return 0
62
+ }
63
+
64
+ func (f *FakeClusterState) ResolveParentNodes(string, string,
65
+ ) (map[string]string, error) {
66
+ return nil, nil
67
+ }
68
+
69
+ func (f *FakeClusterState) NodeHostname(host string) (string, bool) {
70
+ return f.NodeSelector.NodeHostname(host)
71
+ }
72
+
73
+ func (f *FakeClusterState) Execute(cmd *command.ApplyRequest) error {
74
+ return nil
75
+ }
platform/dbops/binaries/weaviate-src/usecases/fakes/fake_raft_address_resolver.go ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // _ _
2
+ // __ _____ __ ___ ___ __ _| |_ ___
3
+ // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
4
+ // \ V V / __/ (_| |\ V /| | (_| | || __/
5
+ // \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
6
+ //
7
+ // Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
8
+ //
9
+ // CONTACT: hello@weaviate.io
10
+ //
11
+
12
+ package fakes
13
+
14
+ type FakeRPCAddressResolver struct {
15
+ addr string
16
+ err error
17
+ }
18
+
19
+ func NewFakeRPCAddressResolver(addr string, err error) *FakeRPCAddressResolver {
20
+ return &FakeRPCAddressResolver{addr: addr, err: err}
21
+ }
22
+
23
+ func (m *FakeRPCAddressResolver) Address(raftAddress string) (string, error) {
24
+ return m.addr, m.err
25
+ }